code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Tuple = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
715
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = """ylacombe/bark-small""" lowercase_ : List[str] = tempfile.mkdtemp() lowercase_ : Tuple = """en_speaker_1""" lowercase_ : Union[str, Any] = """This is a test string""" lowercase_ : int = """speaker_embeddings_path.json""" lowercase_ : Any = """speaker_embeddings""" def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ): return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = self.get_tokenizer() lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase_ : Optional[int] = 35 lowercase_ : int = 2 lowercase_ : Union[str, Any] = 8 lowercase_ : Union[str, Any] = { """semantic_prompt""": np.ones(lowercase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : Dict = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : List[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_tokenizer() lowercase_ : int = BarkProcessor(tokenizer=lowercase_ ) lowercase_ : Any = processor(text=self.input_string ) lowercase_ : List[str] = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
30
0
'''simple docstring''' import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __magic_name__ ( __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, unittest.TestCase): UpperCamelCase__ = StableUnCLIPPipeline UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = 32 lowercase_ : int = embedder_hidden_size # prior components torch.manual_seed(0 ) lowercase_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) lowercase_ : Any = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowercase_ : Any = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCAmelCase_ , num_layers=1 , ) torch.manual_seed(0 ) lowercase_ : List[Any] = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=lowerCAmelCase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) lowercase_ : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_ ) lowercase_ : List[str] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) lowercase_ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) lowercase_ : List[str] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowercase_ : List[Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , ) torch.manual_seed(0 ) lowercase_ : Dict = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , ) torch.manual_seed(0 ) lowercase_ : str = AutoencoderKL() lowercase_ : Any = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] , lowercase_ : List[str]=0 ): if str(lowerCAmelCase_ ).startswith("""mps""" ): lowercase_ : Dict = torch.manual_seed(lowerCAmelCase_ ) else: lowercase_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) lowercase_ : Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[Any] = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Tuple = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_ ) @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) lowercase_ : Tuple = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase_ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase_ : Optional[int] = pipe("""anime turle""" , generator=lowerCAmelCase_ , output_type="""np""" ) lowercase_ : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase_ : str = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) lowercase_ : Union[str, Any] = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase_ : List[Any] = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) lowercase_ : int = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
716
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True}) UpperCamelCase__ = Features({'''image''': Image()}) UpperCamelCase__ = Features({'''labels''': ClassLabel}) UpperCamelCase__ = "image" UpperCamelCase__ = "labels" def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowercase_ : List[str] = copy.deepcopy(self ) lowercase_ : List[str] = self.label_schema.copy() lowercase_ : List[Any] = features[self.label_column] lowercase_ : Optional[Any] = label_schema return task_template @property def SCREAMING_SNAKE_CASE_ ( self : int ): return { self.image_column: "image", self.label_column: "labels", }
30
0
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
717
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowercase_ : str = 1.5 lowercase_ : List[Any] = int(factor * num_class_images ) lowercase_ : int = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ : List[str] = client.query(text=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase_ : List[str] = int(factor * num_images ) lowercase_ : List[str] = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , ) lowercase_ : List[str] = 0 lowercase_ : Dict = 0 lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open( F'''{class_data_dir}/images.txt''' , """w""" ) as fa: while total < num_class_images: lowercase_ : str = class_images[count] count += 1 try: lowercase_ : Union[str, Any] = requests.get(images["""url"""] ) if img.status_code == 200: lowercase_ : List[str] = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ ) return parser.parse_args() if __name__ == "__main__": _lowercase : Dict = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
30
0
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : List[str] = logging.get_logger(__name__) _lowercase : Any = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __magic_name__ ( SCREAMING_SNAKE_CASE__): UpperCamelCase__ = '''mvp''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : int , lowercase_ : Any=50267 , lowercase_ : Optional[Any]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : List[Any]=4096 , lowercase_ : Dict=16 , lowercase_ : List[Any]=12 , lowercase_ : List[str]=4096 , lowercase_ : str=16 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[int]=0.0 , lowercase_ : Any=False , lowercase_ : List[Any]=True , lowercase_ : List[Any]=1 , lowercase_ : Tuple=0 , lowercase_ : List[str]=2 , lowercase_ : str=True , lowercase_ : Union[str, Any]=2 , lowercase_ : str=2 , lowercase_ : Any=False , lowercase_ : List[str]=100 , lowercase_ : Tuple=800 , **lowercase_ : Optional[Any] , ): lowercase_ : Union[str, Any] = vocab_size lowercase_ : List[str] = max_position_embeddings lowercase_ : Dict = d_model lowercase_ : List[str] = encoder_ffn_dim lowercase_ : Dict = encoder_layers lowercase_ : Union[str, Any] = encoder_attention_heads lowercase_ : Any = decoder_ffn_dim lowercase_ : Union[str, Any] = decoder_layers lowercase_ : Union[str, Any] = decoder_attention_heads lowercase_ : Dict = dropout lowercase_ : Tuple = attention_dropout lowercase_ : int = activation_dropout lowercase_ : str = activation_function lowercase_ : str = init_std lowercase_ : Any = encoder_layerdrop lowercase_ : int = decoder_layerdrop lowercase_ : Union[str, Any] = classifier_dropout lowercase_ : Optional[int] = use_cache lowercase_ : List[str] = encoder_layers lowercase_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True lowercase_ : str = use_prompt lowercase_ : Optional[int] = prompt_length lowercase_ : Optional[int] = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , snake_case__ ): lowercase_ : Optional[int] = self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' """The config can simply be saved and uploaded again to be fixed.""" )
718
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None: if start is None: lowercase_ : Any = 0 if end is None: lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1 if start >= end: return lowercase_ : Optional[int] = (start + end) // 2 slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ ) if sequence[end] < sequence[mid]: lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end] slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] ) -> Optional[Any]: lowercase_ : List[str] = [0 for i in range(r + 1 )] # nc0 = 1 lowercase_ : Dict = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowercase_ : Union[str, Any] = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
719
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowercase : Dict = parser.parse_args() _lowercase : Dict = "cpu" _lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowercase : Any = "path-to-your-trained-model" _lowercase : str = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowercase : Any = pipe.to(device) # to channels last _lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last) _lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last) _lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowercase : int = torch.randn(2, 4, 64, 64) _lowercase : int = torch.rand(1) * 999 _lowercase : Union[str, Any] = torch.randn(2, 77, 768) _lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status) try: _lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowercase : int = 666 _lowercase : Any = torch.Generator(device).manual_seed(seed) _lowercase : int = {"generator": generator} if args.steps is not None: _lowercase : Optional[int] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
30
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Optional[Any] = { """configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""], """tokenization_cpmant""": ["""CpmAntTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = [ """CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""", """CpmAntForCausalLM""", """CpmAntModel""", """CpmAntPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys _lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
720
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Optional[Any] = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : list[str] | None = None ) -> list[list[str]]: lowercase_ : Any = word_bank or [] # create a table lowercase_ : int = len(UpperCAmelCase__ ) + 1 lowercase_ : list[list[list[str]]] = [] for _ in range(UpperCAmelCase__ ): table.append([] ) # seed value lowercase_ : int = [[]] # because empty string has empty combination # iterate through the indices for i in range(UpperCAmelCase__ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(UpperCAmelCase__ )] == word: lowercase_ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(UpperCAmelCase__ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(UpperCAmelCase__ )]: combination.reverse() return table[len(UpperCAmelCase__ )] if __name__ == "__main__": print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"])) print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"])) print( all_construct( "hexagonosaurus", ["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"], ) )
721
'''simple docstring''' import unittest import numpy as np def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray: lowercase_ : List[Any] = np.shape(UpperCAmelCase__ ) lowercase_ : Dict = np.shape(UpperCAmelCase__ ) lowercase_ : int = np.shape(UpperCAmelCase__ ) if shape_a[0] != shape_b[0]: lowercase_ : Optional[int] = ( """Expected the same number of rows for A and B. """ F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(UpperCAmelCase__ ) if shape_b[1] != shape_c[1]: lowercase_ : Optional[Any] = ( """Expected the same number of columns for B and C. """ F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(UpperCAmelCase__ ) lowercase_ : Any = pseudo_inv if a_inv is None: try: lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Dict = np.array([[2, 1], [6, 3]] ) lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] ) lowercase_ : Optional[int] = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) self.assertAlmostEqual(lowercase_ , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : Tuple = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Dict: if isinstance(_lowerCAmelCase , np.ndarray ): return list(tensor.shape ) lowercase_ : Union[str, Any] = tf.shape(_lowerCAmelCase ) if tensor.shape == tf.TensorShape(_lowerCAmelCase ): return dynamic lowercase_ : Any = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_lowerCAmelCase )] def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] = None , UpperCAmelCase__ : int = None ) -> Optional[int]: return tf.nn.softmax(logits=logits + 1e-9 , axis=_lowerCAmelCase , name=_lowerCAmelCase ) def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=1e-5 , UpperCAmelCase__ : Union[str, Any]=-1 ) -> Optional[Any]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : int = tf.nn.moments(_lowerCAmelCase , axes=[axis] , keepdims=_lowerCAmelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : Tuple = [1] * inputs.shape.rank lowercase_ : Union[str, Any] = shape_list(_lowerCAmelCase )[axis] lowercase_ : Dict = tf.reshape(_lowerCAmelCase , _lowerCAmelCase ) lowercase_ : str = tf.reshape(_lowerCAmelCase , _lowerCAmelCase ) # Compute layer normalization using the batch_normalization # function. lowercase_ : List[Any] = tf.nn.batch_normalization( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , offset=_lowerCAmelCase , scale=_lowerCAmelCase , variance_epsilon=_lowerCAmelCase , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=-1 ) -> str: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : int = tf.shape(_lowerCAmelCase ) lowercase_ : Dict = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Union[str, Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(_lowerCAmelCase , _lowerCAmelCase ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> List[str]: if not isinstance(_lowerCAmelCase , tf.Tensor ): lowercase_ : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : List[Any] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : Optional[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Dict = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int = "input_ids" ) -> Union[str, Any]: tf.debugging.assert_less( _lowerCAmelCase , tf.cast(_lowerCAmelCase , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(_lowerCAmelCase )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ) -> int: lowercase_ : Dict = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Tuple = [x for x in data if len(_lowerCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Union[str, Any] = np.asarray(_lowerCAmelCase ) lowercase_ : Optional[Any] = 1 lowercase_ : List[str] = np.array_split(_lowerCAmelCase , _lowerCAmelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : List[Any] = np.array_split(_lowerCAmelCase , _lowerCAmelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_lowerCAmelCase ): lowercase_ : Optional[int] = chunk_data else: lowercase_ : Dict = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ) -> Tuple: if name in group.attrs: lowercase_ : Any = [n.decode("""utf8""" ) if hasattr(_lowerCAmelCase , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : str = [] lowercase_ : str = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(_lowerCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> List[str]: def _expand_single_ad_tensor(UpperCAmelCase__ : Tuple ): if isinstance(_lowerCAmelCase , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_lowerCAmelCase , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , _lowerCAmelCase )
700
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(UpperCAmelCase__ ) lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data ) lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6) else: lowercase_ : Union[str, Any] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode() + padding ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[str] = ( """argument should be a bytes-like object or ASCII string, """ F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(UpperCAmelCase__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): try: lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowercase_ : Any = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase_ : Optional[int] = encoded_data[:-padding] lowercase_ : Any = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase_ : int = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data ) lowercase_ : Optional[int] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(UpperCAmelCase__ ) , 8 ) ] return bytes(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int ) -> str: if length <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(lowerCamelCase_ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
701
'''simple docstring''' import argparse _lowercase : Optional[int] = "docs/source/_static/js/custom.js" def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict: with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Optional[int] = f.readlines() lowercase_ : Tuple = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += F''' "v{version}": "v{version}",\n''' with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") _lowercase : Dict = parser.parse_args() update_custom_js(args.version)
30
0
'''simple docstring''' from math import pow def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , ) -> tuple[int, int]: if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count lowercase_ : List[str] = int(pow(UpperCamelCase__ , UpperCamelCase__ ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n lowercase_ , lowercase_ : Union[str, Any] = backtrack( UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. lowercase_ , lowercase_ : Optional[int] = backtrack( UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ ) return current_sum, solutions_count def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] ) -> int: if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(UpperCamelCase__ , UpperCamelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
702
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ): lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : Any = image_size lowercase_ : Optional[Any] = num_channels lowercase_ : Any = embeddings_size lowercase_ : Union[str, Any] = hidden_sizes lowercase_ : Any = depths lowercase_ : Dict = is_training lowercase_ : Tuple = use_labels lowercase_ : str = hidden_act lowercase_ : Optional[Any] = num_labels lowercase_ : Tuple = scope lowercase_ : Any = len(lowercase_ ) lowercase_ : Optional[Any] = out_features lowercase_ : Tuple = out_indices lowercase_ : str = num_groups def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : List[Any] = None if self.use_labels: lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : int = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ): lowercase_ : Optional[int] = BitModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ): lowercase_ : Union[str, Any] = self.num_labels lowercase_ : Tuple = BitForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Any = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ): lowercase_ : Any = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Dict = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase_ : List[str] = None lowercase_ : Dict = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[int] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs lowercase_ : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = BitModelTester(self ) lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return @unittest.skip(reason="""Bit does not output attentions""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[Any] = model_class(lowercase_ ) lowercase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(config=lowercase_ ) for name, module in model.named_modules(): if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : Optional[Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase_ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Dict = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase_ : Union[str, Any] = layer_type lowercase_ : Optional[Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Union[str, Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ ) lowercase_ : int = self.default_image_processor lowercase_ : List[Any] = prepare_img() lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : str = model(**lowercase_ ) # verify the logits lowercase_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitBackbone,) if is_torch_available() else () UpperCamelCase__ = BitConfig UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Union[str, Any] = BitModelTester(self )
30
0
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Dict , lowercase_ : List[str] , lowercase_ : Dict=7 , lowercase_ : Optional[int]=3 , lowercase_ : List[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : Any=400 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=None , lowercase_ : Any=True , ): lowercase_ : Union[str, Any] = size if size is not None else {"height": 18, "width": 18} lowercase_ : str = parent lowercase_ : List[str] = batch_size lowercase_ : Tuple = num_channels lowercase_ : Optional[int] = image_size lowercase_ : List[str] = min_resolution lowercase_ : List[str] = max_resolution lowercase_ : Optional[Any] = do_resize lowercase_ : Optional[Any] = size lowercase_ : List[str] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : Dict ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __magic_name__ ( snake_case__, unittest.TestCase): UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Tuple = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """clusters""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = self.image_processing_class(**self.image_processor_dict ) lowercase_ : int = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : List[str] = os.path.join(lowercase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowercase_ ) lowercase_ : Tuple = self.image_processing_class.from_json_file(lowercase_ ).to_dict() lowercase_ : Union[str, Any] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowercase_ ) lowercase_ : Optional[Any] = self.image_processing_class.from_pretrained(lowercase_ ).to_dict() lowercase_ : List[Any] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): pass def lowerCamelCase ( ) -> Optional[int]: lowercase_ : Any = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowercase_ : str = Image.open(dataset[4]["""file"""] ) lowercase_ : Any = Image.open(dataset[5]["""file"""] ) lowercase_ : List[str] = [imagea, imagea] return images @require_vision @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : List[str] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase_ : Optional[Any] = prepare_images() # test non-batched lowercase_ : Any = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowercase_ : str = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ ) # test batched lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowercase_ : str = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
703
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge _lowercase : Optional[Any] = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] _lowercase : List[Any] = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowerCamelCase ( ) -> List[str]: lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] ) assert ( pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean() ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Tuple = """rougeLsum""" lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCamelCase ( ) -> List[Any]: lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""] lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) assert score_sep == score_no_sep def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Union[str, Any] = [ """Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""", """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""", ] lowercase_ : List[str] = [ """Margot Frank, died in 1945, a month earlier than previously thought.""", """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of""" """ the final seconds on board Flight 9525.""", ] assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) def lowerCamelCase ( ) -> Union[str, Any]: lowercase_ : Optional[Any] = [ """\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """ ] lowercase_ : List[Any] = [ """ Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .""" ] lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""] lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""] assert new_score > prev_score def lowerCamelCase ( ) -> Tuple: lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" ) lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = calculate_rouge_path( data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
30
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType _lowercase : int = logging.get_logger(__name__) _lowercase : Optional[int] = { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''deberta-v2''' def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any]=128100 , lowercase_ : List[Any]=1536 , lowercase_ : List[str]=24 , lowercase_ : Dict=24 , lowercase_ : Union[str, Any]=6144 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[str]=512 , lowercase_ : Tuple=0 , lowercase_ : List[str]=0.02 , lowercase_ : Tuple=1E-7 , lowercase_ : str=False , lowercase_ : Dict=-1 , lowercase_ : str=0 , lowercase_ : Any=True , lowercase_ : Optional[int]=None , lowercase_ : str=0 , lowercase_ : List[Any]="gelu" , **lowercase_ : List[str] , ): super().__init__(**A_ ) lowercase_ : Any = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : List[Any] = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Dict = hidden_act lowercase_ : Tuple = hidden_dropout_prob lowercase_ : int = attention_probs_dropout_prob lowercase_ : str = max_position_embeddings lowercase_ : List[str] = type_vocab_size lowercase_ : Union[str, Any] = initializer_range lowercase_ : List[str] = relative_attention lowercase_ : str = max_relative_positions lowercase_ : Optional[int] = pad_token_id lowercase_ : List[Any] = position_biased_input # Backwards compatibility if type(A_ ) == str: lowercase_ : List[str] = [x.strip() for x in pos_att_type.lower().split("""|""" )] lowercase_ : List[str] = pos_att_type lowercase_ : Union[str, Any] = vocab_size lowercase_ : List[Any] = layer_norm_eps lowercase_ : Optional[Any] = kwargs.get("""pooler_hidden_size""" , A_ ) lowercase_ : Dict = pooler_dropout lowercase_ : int = pooler_hidden_act class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): if self.task == "multiple-choice": lowercase_ : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : Dict = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return 12 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , lowercase_ : "PreTrainedTokenizerBase" = None , ): lowercase_ : Optional[int] = super().generate_dummy_inputs(preprocessor=A_ , framework=A_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
704
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''speech_to_text''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ): lowercase_ : List[Any] = vocab_size lowercase_ : str = d_model lowercase_ : List[Any] = encoder_ffn_dim lowercase_ : str = encoder_layers lowercase_ : Dict = encoder_attention_heads lowercase_ : str = decoder_ffn_dim lowercase_ : int = decoder_layers lowercase_ : Any = decoder_attention_heads lowercase_ : Any = dropout lowercase_ : Dict = attention_dropout lowercase_ : Optional[int] = activation_dropout lowercase_ : Any = activation_function lowercase_ : Union[str, Any] = init_std lowercase_ : str = encoder_layerdrop lowercase_ : Optional[int] = decoder_layerdrop lowercase_ : Dict = use_cache lowercase_ : Union[str, Any] = encoder_layers lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True lowercase_ : Dict = max_source_positions lowercase_ : Optional[int] = max_target_positions lowercase_ : Tuple = num_conv_layers lowercase_ : Tuple = list(lowercase_ ) lowercase_ : Union[str, Any] = conv_channels lowercase_ : str = input_feat_per_channel lowercase_ : str = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Optional[Any] = { 'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ 'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
705
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __magic_name__ : def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ): lowercase_ : int = parent lowercase_ : str = batch_size lowercase_ : List[str] = image_size lowercase_ : str = num_channels lowercase_ : List[Any] = patch_size lowercase_ : Optional[Any] = num_frames lowercase_ : Dict = is_training lowercase_ : int = use_labels lowercase_ : List[str] = hidden_size lowercase_ : Dict = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : List[Any] = attention_probs_dropout_prob lowercase_ : Any = attention_type lowercase_ : Union[str, Any] = initializer_range lowercase_ : List[str] = scope lowercase_ : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token lowercase_ : Dict = (image_size // patch_size) ** 2 lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Optional[Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase_ : int = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : int = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) lowercase_ : Any = self.num_labels return config def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ): lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ): lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) # verify the logits shape lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[str] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs lowercase_ : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ): lowercase_ : List[Any] = copy.deepcopy(lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): lowercase_ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : str = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Dict = model_class(lowercase_ ) lowercase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): if not self.has_attentions: pass else: lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : List[str] = True for model_class in self.all_model_classes: lowercase_ : str = self.model_tester.seq_length lowercase_ : int = self.model_tester.num_frames lowercase_ : int = True lowercase_ : Any = False lowercase_ : str = True lowercase_ : int = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : List[str] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase_ : List[str] = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : int = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) lowercase_ : Optional[Any] = len(lowercase_ ) # Check attention is always last and order is fine lowercase_ : Tuple = True lowercase_ : Dict = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + 1 , len(lowercase_ ) ) lowercase_ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ): lowercase_ : List[str] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : Dict = outputs.hidden_states lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase_ ) , lowercase_ ) lowercase_ : List[Any] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[str] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def lowerCamelCase ( ) -> Optional[int]: lowercase_ : List[str] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) lowercase_ : List[Any] = np.load(UpperCAmelCase__ ) return list(UpperCAmelCase__ ) @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : str ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( lowercase_ ) lowercase_ : Optional[Any] = self.default_image_processor lowercase_ : Any = prepare_video() lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : Optional[Any] = model(**lowercase_ ) # verify the logits lowercase_ : Any = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowercase : int = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = ["SpeechEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
706
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig _lowercase : Tuple = logging.get_logger(__name__) # General docstring _lowercase : List[str] = "RegNetConfig" # Base docstring _lowercase : Dict = "facebook/regnet-y-040" _lowercase : Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring _lowercase : Optional[Any] = "facebook/regnet-y-040" _lowercase : Union[str, Any] = "tabby, tabby cat" _lowercase : str = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __magic_name__ ( nn.Module): def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ): super().__init__() lowercase_ : List[Any] = nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , ) lowercase_ : str = nn.BatchNormad(lowercase_ ) lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ): lowercase_ : Dict = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : List[Any] , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : str = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowercase_ : Any = config.num_channels def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ): lowercase_ : List[str] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) lowercase_ : Any = self.embedder(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ): super().__init__() lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ ) lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ): lowercase_ : Tuple = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : int , lowercase_ : int ): super().__init__() lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) lowercase_ : int = nn.Sequential( nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ): # b c h w -> b c 1 1 lowercase_ : List[str] = self.pooler(lowercase_ ) lowercase_ : Optional[int] = self.attention(lowercase_ ) lowercase_ : Any = hidden_state * attention return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : List[Any] = in_channels != out_channels or stride != 1 lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width ) lowercase_ : Dict = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : List[Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : int = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ): lowercase_ : Any = hidden_state lowercase_ : Union[str, Any] = self.layer(lowercase_ ) lowercase_ : Union[str, Any] = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : str = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : str = in_channels != out_channels or stride != 1 lowercase_ : int = max(1 , out_channels // config.groups_width ) lowercase_ : int = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : Union[str, Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : Optional[int] = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): lowercase_ : Optional[int] = hidden_state lowercase_ : str = self.layer(lowercase_ ) lowercase_ : int = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ): super().__init__() lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer lowercase_ : str = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ): lowercase_ : Tuple = self.layers(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Dict , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : Optional[Any] = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ): self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ): lowercase_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ : Union[str, Any] = hidden_states + (hidden_state,) lowercase_ : Dict = stage_module(lowercase_ ) if output_hidden_states: lowercase_ : Optional[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = RegNetConfig UpperCamelCase__ = '''regnet''' UpperCamelCase__ = '''pixel_values''' UpperCamelCase__ = True def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ): if isinstance(lowercase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ): if isinstance(lowercase_ , lowercase_ ): lowercase_ : List[str] = value _lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Any , lowercase_ : Any ): super().__init__(lowercase_ ) lowercase_ : List[str] = config lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ ) lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ ) lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ): lowercase_ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : str = self.embedder(lowercase_ ) lowercase_ : Optional[Any] = self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : List[Any] = encoder_outputs[0] lowercase_ : str = self.pooler(lowercase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Dict , lowercase_ : str ): super().__init__(lowercase_ ) lowercase_ : Any = config.num_labels lowercase_ : List[str] = RegNetModel(lowercase_ ) # classification head lowercase_ : Any = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ): lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1] lowercase_ : List[Any] = self.classifier(lowercase_ ) lowercase_ : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase_ : Optional[int] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase_ : str = """single_label_classification""" else: lowercase_ : str = """multi_label_classification""" if self.config.problem_type == "regression": lowercase_ : str = MSELoss() if self.num_labels == 1: lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ ) elif self.config.problem_type == "single_label_classification": lowercase_ : Optional[int] = CrossEntropyLoss() lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase_ : Dict = BCEWithLogitsLoss() lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ ) if not return_dict: lowercase_ : Tuple = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
30
0
from __future__ import annotations import os from collections.abc import Mapping _lowercase : Optional[int] = tuple[int, int] class __magic_name__ : def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : int ): lowercase_ : set[int] = vertices lowercase_ : dict[EdgeT, int] = { (min(lowercase_ ), max(lowercase_ )): weight for edge, weight in edges.items() } def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : str ): self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowercase_ : Union[str, Any] = weight def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Graph = Graph({min(self.vertices )} , {} ) lowercase_ : EdgeT lowercase_ : int lowercase_ : EdgeT lowercase_ : int while len(subgraph.vertices ) < len(self.vertices ): lowercase_ : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowercase_ : int = edge lowercase_ : Optional[Any] = weight subgraph.add_edge(lowercase_ , lowercase_ ) return subgraph def lowerCamelCase ( UpperCAmelCase__ : str = "p107_network.txt" ) -> int: lowercase_ : str = os.path.abspath(os.path.dirname(UpperCAmelCase__ ) ) lowercase_ : str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : dict[EdgeT, int] = {} lowercase_ : list[str] lowercase_ : int lowercase_ : int with open(UpperCAmelCase__ ) as f: lowercase_ : Any = f.read().strip().split("""\n""" ) lowercase_ : Dict = [line.split(""",""" ) for line in data] for edgea in range(1 , len(UpperCAmelCase__ ) ): for edgea in range(UpperCAmelCase__ ): if adjaceny_matrix[edgea][edgea] != "-": lowercase_ : Tuple = int(adjaceny_matrix[edgea][edgea] ) lowercase_ : Graph = Graph(set(range(len(UpperCAmelCase__ ) ) ) , UpperCAmelCase__ ) lowercase_ : Graph = graph.prims_algorithm() lowercase_ : int = sum(graph.edges.values() ) lowercase_ : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"""{solution() = }""")
707
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : List[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class __magic_name__ ( __A): UpperCamelCase__ = """unispeech-sat""" def __init__( self : Optional[Any] , lowercase_ : Union[str, Any]=32 , lowercase_ : Any=768 , lowercase_ : Optional[Any]=12 , lowercase_ : int=12 , lowercase_ : Dict=3072 , lowercase_ : Tuple="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : str=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : str=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : str=False , lowercase_ : str=128 , lowercase_ : Dict=16 , lowercase_ : str=False , lowercase_ : int=True , lowercase_ : Optional[int]=0.05 , lowercase_ : str=10 , lowercase_ : List[Any]=2 , lowercase_ : List[Any]=0.0 , lowercase_ : List[str]=10 , lowercase_ : List[Any]=0 , lowercase_ : List[str]=320 , lowercase_ : Tuple=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=100 , lowercase_ : List[str]=256 , lowercase_ : Optional[int]=256 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Union[str, Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Union[str, Any]=False , lowercase_ : Union[str, Any]=256 , lowercase_ : Optional[int]=(512, 512, 512, 512, 1500) , lowercase_ : Dict=(5, 3, 3, 1, 1) , lowercase_ : Tuple=(1, 2, 3, 1, 1) , lowercase_ : List[str]=512 , lowercase_ : Optional[Any]=0 , lowercase_ : List[Any]=1 , lowercase_ : List[str]=2 , lowercase_ : Any=504 , **lowercase_ : Tuple , ): super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) lowercase_ : int = hidden_size lowercase_ : List[Any] = feat_extract_norm lowercase_ : str = feat_extract_activation lowercase_ : str = list(UpperCamelCase__ ) lowercase_ : List[str] = list(UpperCamelCase__ ) lowercase_ : Union[str, Any] = list(UpperCamelCase__ ) lowercase_ : List[Any] = conv_bias lowercase_ : Union[str, Any] = num_conv_pos_embeddings lowercase_ : str = num_conv_pos_embedding_groups lowercase_ : int = len(self.conv_dim ) lowercase_ : int = num_hidden_layers lowercase_ : Dict = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : Tuple = num_attention_heads lowercase_ : Tuple = hidden_dropout lowercase_ : Optional[Any] = attention_dropout lowercase_ : Any = activation_dropout lowercase_ : Any = feat_proj_dropout lowercase_ : str = final_dropout lowercase_ : Optional[int] = layerdrop lowercase_ : Union[str, Any] = layer_norm_eps lowercase_ : Union[str, Any] = initializer_range lowercase_ : str = vocab_size lowercase_ : Optional[int] = num_clusters lowercase_ : Tuple = do_stable_layer_norm lowercase_ : str = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase_ : Dict = apply_spec_augment lowercase_ : Any = mask_time_prob lowercase_ : Optional[Any] = mask_time_length lowercase_ : str = mask_time_min_masks lowercase_ : int = mask_feature_prob lowercase_ : int = mask_feature_length lowercase_ : List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase_ : List[str] = num_codevectors_per_group lowercase_ : Tuple = num_codevector_groups lowercase_ : List[str] = contrastive_logits_temperature lowercase_ : Optional[Any] = feat_quantizer_dropout lowercase_ : Any = num_negatives lowercase_ : Any = codevector_dim lowercase_ : Tuple = proj_codevector_dim lowercase_ : Optional[int] = diversity_loss_weight # ctc loss lowercase_ : Optional[int] = ctc_loss_reduction lowercase_ : Union[str, Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase_ : Dict = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase_ : Union[str, Any] = list(UpperCamelCase__ ) lowercase_ : Optional[int] = list(UpperCamelCase__ ) lowercase_ : Any = list(UpperCamelCase__ ) lowercase_ : int = xvector_output_dim @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return functools.reduce(operator.mul , self.conv_stride , 1 )
708
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ ) lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase_ : str = dataset_size < in_memory_max_size else: lowercase_ : List[Any] = False lowercase_ : Any = is_small_dataset(UpperCAmelCase__ ) assert result == expected
30
0
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : List[str] = 0 @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(lowerCamelCase_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowercase_ : str = AutoTokenizer.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(lowerCamelCase_ ) , 0 ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : str = AutoTokenizer.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Dict = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) # Check that tokenizer_type ≠ model_type lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowerCamelCase_ , """vocab.txt""" ) ) lowercase_ : Any = AutoTokenizer.from_pretrained(lowerCamelCase_ , tokenizer_type="""bert""" , use_fast=lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowerCamelCase_ , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowerCamelCase_ , """merges.txt""" ) ) lowercase_ : str = AutoTokenizer.from_pretrained(lowerCamelCase_ , tokenizer_type="""gpt2""" , use_fast=lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowerCamelCase_ , """vocab.txt""" ) ) lowercase_ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase_ , tokenizer_type="""bert""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowerCamelCase_ , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowerCamelCase_ , """merges.txt""" ) ) lowercase_ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase_ , tokenizer_type="""gpt2""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): with pytest.raises(lowerCamelCase_ ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Any ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowercase_ : Dict = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(lowerCamelCase_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCamelCase_ ) else: self.assertEqual(tokenizer.do_lower_case , lowerCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Any ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( lowerCamelCase_ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): lowercase_ : str = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowercase_ : Optional[Any] = TOKENIZER_MAPPING.values() lowercase_ : List[str] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(lowerCamelCase_ ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowerCamelCase_ ) , lowerCamelCase_ ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowerCamelCase_ ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowerCamelCase_ ) lowercase_ : Dict = """Hello, world. How are you?""" lowercase_ : Any = tokenizer.tokenize(lowerCamelCase_ ) self.assertEqual("""[UNK]""" , tokens[0] ) lowercase_ : Any = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowerCamelCase_ ) lowercase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ) lowercase_ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : int = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): # Check we can load the tokenizer config of an online model. lowercase_ : Any = get_tokenizer_config("""bert-base-cased""" ) lowercase_ : Union[str, Any] = config.pop("""_commit_hash""" , lowerCamelCase_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(lowerCamelCase_ , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowercase_ : List[str] = get_tokenizer_config(lowerCamelCase_ ) self.assertDictEqual(lowerCamelCase_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowercase_ : int = AutoTokenizer.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ) lowercase_ : int = get_tokenizer_config(lowerCamelCase_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoTokenizer.register(lowerCamelCase_ , slow_tokenizer_class=lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoTokenizer.register(lowerCamelCase_ , slow_tokenizer_class=lowerCamelCase_ ) lowercase_ : List[str] = CustomTokenizer.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ) lowercase_ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Dict ): try: AutoConfig.register("""custom""" , lowerCamelCase_ ) # Can register in two steps AutoTokenizer.register(lowerCamelCase_ , slow_tokenizer_class=lowerCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(lowerCamelCase_ , fast_tokenizer_class=lowerCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( lowerCamelCase_ , slow_tokenizer_class=lowerCamelCase_ , fast_tokenizer_class=lowerCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoTokenizer.register(lowerCamelCase_ , fast_tokenizer_class=lowerCamelCase_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : int = BertTokenizerFast.from_pretrained(lowerCamelCase_ ) bert_tokenizer.save_pretrained(lowerCamelCase_ ) lowercase_ : Tuple = CustomTokenizerFast.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ) lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE_ ( self : Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase_ ): lowercase_ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): lowercase_ : Dict = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCamelCase_ ) lowercase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ) lowercase_ : str = AutoTokenizer.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase_ : int = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCamelCase_ , use_fast=lowerCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ) lowercase_ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ , use_fast=lowerCamelCase_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): class __magic_name__ ( a__): UpperCamelCase__ = False class __magic_name__ ( a__): UpperCamelCase__ = NewTokenizer UpperCamelCase__ = False try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoTokenizer.register(lowerCamelCase_ , slow_tokenizer_class=lowerCamelCase_ ) AutoTokenizer.register(lowerCamelCase_ , fast_tokenizer_class=lowerCamelCase_ ) # If remote code is not set, the default is to use local lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) lowercase_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowerCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowercase_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) lowercase_ : int = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCamelCase_ , use_fast=lowerCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowercase_ : List[str] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCamelCase_ , use_fast=lowerCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Tuple = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowerCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowerCamelCase_ , use_fast=lowerCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def SCREAMING_SNAKE_CASE_ ( self : str ): with self.assertRaisesRegex( lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): with self.assertRaisesRegex( lowerCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowercase_ : str = AutoTokenizer.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): # Make sure we have cached the tokenizer. lowercase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: lowercase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
709
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ) lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" ) lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy() lowercase_ : Optional[int] = -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> Optional[Any]: lowercase_ : Optional[Any] = word.split() def justify(UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str: lowercase_ : Optional[Any] = max_width - width lowercase_ : Optional[Any] = len(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowercase_ : Dict = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowercase_ : Optional[int] = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowercase_ : Any = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(UpperCAmelCase__ ): num_spaces_between_words_list[i] += 1 lowercase_ : Optional[int] = [] for i in range(UpperCAmelCase__ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * """ """ ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(UpperCAmelCase__ ) lowercase_ : Tuple = [] lowercase_ : int = [] lowercase_ : Tuple = 0 for word in words: if width + len(UpperCAmelCase__ ) + len(UpperCAmelCase__ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(UpperCAmelCase__ ) width += len(UpperCAmelCase__ ) else: # justify the line and add it to result answer.append(justify(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) ) # reset new line and new width lowercase_ , lowercase_ : str = [word], len(UpperCAmelCase__ ) lowercase_ : Optional[Any] = max_width - width - len(UpperCAmelCase__ ) answer.append(""" """.join(UpperCAmelCase__ ) + (remaining_spaces + 1) * """ """ ) return answer if __name__ == "__main__": from doctest import testmod testmod()
710
'''simple docstring''' from collections.abc import Callable import numpy as np def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array: lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) ) lowercase_ : List[Any] = np.zeros((n + 1,) ) lowercase_ : List[Any] = ya lowercase_ : List[str] = xa for k in range(UpperCAmelCase__ ): lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] ) lowercase_ : List[Any] = y[k] + ( (step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' class __magic_name__ : def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[Any]=None , lowercase_ : Any=None ): lowercase_ : Optional[int] = data lowercase_ : int = previous lowercase_ : Optional[int] = next_node def __str__( self : List[Any] ): return f'''{self.data}''' def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return self.data def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return self.next def SCREAMING_SNAKE_CASE_ ( self : int ): return self.previous class __magic_name__ : def __init__( self : int , lowercase_ : Union[str, Any] ): lowercase_ : Optional[Any] = head def __iter__( self : List[str] ): return self def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): if not self.current: raise StopIteration else: lowercase_ : int = self.current.get_data() lowercase_ : List[str] = self.current.get_next() return value class __magic_name__ : def __init__( self : Dict ): lowercase_ : Tuple = None # First node in list lowercase_ : Tuple = None # Last node in list def __str__( self : int ): lowercase_ : str = self.head lowercase_ : Any = [] while current is not None: nodes.append(current.get_data() ) lowercase_ : Any = current.get_next() return " ".join(str(lowerCAmelCase_ ) for node in nodes ) def __contains__( self : int , lowercase_ : int ): lowercase_ : Optional[Any] = self.head while current: if current.get_data() == value: return True lowercase_ : List[Any] = current.get_next() return False def __iter__( self : Optional[int] ): return LinkedListIterator(self.head ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): if self.head: return self.head.get_data() return None def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): if self.tail: return self.tail.get_data() return None def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Node ): if self.head is None: lowercase_ : Union[str, Any] = node lowercase_ : str = node else: self.insert_before_node(self.head , lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Node ): if self.head is None: self.set_head(lowerCAmelCase_ ) else: self.insert_after_node(self.tail , lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int ): lowercase_ : int = Node(lowerCAmelCase_ ) if self.head is None: self.set_head(lowerCAmelCase_ ) else: self.set_tail(lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Node , lowercase_ : Node ): lowercase_ : List[Any] = node lowercase_ : List[str] = node.previous if node.get_previous() is None: lowercase_ : Any = node_to_insert else: lowercase_ : List[str] = node_to_insert lowercase_ : List[str] = node_to_insert def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Node , lowercase_ : Node ): lowercase_ : Union[str, Any] = node lowercase_ : Any = node.next if node.get_next() is None: lowercase_ : int = node_to_insert else: lowercase_ : int = node_to_insert lowercase_ : List[Any] = node_to_insert def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : int , lowercase_ : int ): lowercase_ : Union[str, Any] = 1 lowercase_ : Union[str, Any] = Node(lowerCAmelCase_ ) lowercase_ : Union[str, Any] = self.head while node: if current_position == position: self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ ) return current_position += 1 lowercase_ : Optional[int] = node.next self.insert_after_node(self.tail , lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : int ): lowercase_ : List[str] = self.head while node: if node.get_data() == item: return node lowercase_ : Tuple = node.get_next() raise Exception("""Node not found""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] ): if (node := self.get_node(lowerCAmelCase_ )) is not None: if node == self.head: lowercase_ : List[Any] = self.head.get_next() if node == self.tail: lowercase_ : Union[str, Any] = self.tail.get_previous() self.remove_node_pointers(lowerCAmelCase_ ) @staticmethod def SCREAMING_SNAKE_CASE_ ( lowercase_ : Node ): if node.get_next(): lowercase_ : Any = node.previous if node.get_previous(): lowercase_ : Optional[Any] = node.next lowercase_ : Optional[Any] = None lowercase_ : Optional[Any] = None def SCREAMING_SNAKE_CASE_ ( self : Dict ): return self.head is None def lowerCamelCase ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
711
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Dict = (1 - _cos) / 2 lowercase_ : Optional[int] = 1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : List[Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Optional[int] = sin(UpperCAmelCase__ ) lowercase_ : Dict = cos(UpperCAmelCase__ ) lowercase_ : Optional[int] = _sin / (2 * q_factor) lowercase_ : Dict = (1 + _cos) / 2 lowercase_ : str = -1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : List[Any] = 1 - alpha lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : int = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ ) lowercase_ : str = _sin / (2 * q_factor) lowercase_ : str = _sin / 2 lowercase_ : Any = 0 lowercase_ : Optional[Any] = -ba lowercase_ : Dict = 1 + alpha lowercase_ : Union[str, Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : List[str] = tau * frequency / samplerate lowercase_ : Any = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : Optional[Any] = _sin / (2 * q_factor) lowercase_ : Any = 1 - alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : Optional[int] = 1 + alpha lowercase_ : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : List[Any] = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : List[str] = 1 + alpha * big_a lowercase_ : List[Any] = -2 * _cos lowercase_ : Dict = 1 - alpha * big_a lowercase_ : str = 1 + alpha / big_a lowercase_ : List[str] = -2 * _cos lowercase_ : Tuple = 1 - alpha / big_a lowercase_ : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ ) lowercase_ : Any = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (pmc + aaa) lowercase_ : List[str] = 2 * big_a * mpc lowercase_ : Union[str, Any] = big_a * (pmc - aaa) lowercase_ : Optional[int] = ppmc + aaa lowercase_ : Optional[int] = -2 * pmpc lowercase_ : Any = ppmc - aaa lowercase_ : Optional[int] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Dict = _sin / (2 * q_factor) lowercase_ : Union[str, Any] = 10 ** (gain_db / 40) lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (ppmc + aaa) lowercase_ : List[Any] = -2 * big_a * pmpc lowercase_ : Optional[Any] = big_a * (ppmc - aaa) lowercase_ : Optional[Any] = pmc + aaa lowercase_ : int = 2 * mpc lowercase_ : Tuple = pmc - aaa lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
30
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowercase : List[Any] = logging.get_logger(__name__) class __magic_name__ ( _UpperCamelCase): UpperCamelCase__ = ['''pixel_values'''] def __init__( self : Any , lowercase_ : Optional[Any] = True , lowercase_ : List[str] = None , lowercase_ : List[Any] = PILImageResampling.BICUBIC , lowercase_ : Optional[int] = True , lowercase_ : List[Any] = True , lowercase_ : Optional[int] = 1 / 255 , lowercase_ : Union[str, Any] = None , lowercase_ : Union[str, Any] = True , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Any] = None , **lowercase_ : List[Any] , ): super().__init__(**_UpperCAmelCase ) lowercase_ : Optional[Any] = size if size is not None else {'''height''': 224, '''width''': 224} lowercase_ : Any = get_size_dict(_UpperCAmelCase ) lowercase_ : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase_ : int = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name="""crop_size""" ) lowercase_ : int = do_resize lowercase_ : str = do_rescale lowercase_ : Dict = do_normalize lowercase_ : int = do_center_crop lowercase_ : List[str] = crop_size lowercase_ : Optional[Any] = size lowercase_ : Tuple = resample lowercase_ : Tuple = rescale_factor lowercase_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase_ : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : str = PILImageResampling.BILINEAR , lowercase_ : str = None , **lowercase_ : List[str] , ): lowercase_ : Union[str, Any] = get_size_dict(_UpperCAmelCase ) if "shortest_edge" in size: lowercase_ : Optional[int] = get_resize_output_image_size(_UpperCAmelCase , size=size["""shortest_edge"""] , default_to_square=_UpperCAmelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: lowercase_ : Any = (size['''height'''], size['''width''']) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any = None , **lowercase_ : List[Any] , ): lowercase_ : Tuple = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str = None , **lowercase_ : Union[str, Any] ): return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] = None , **lowercase_ : Tuple , ): return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : Optional[int] = None , lowercase_ : List[Any] = None , lowercase_ : Optional[int] = None , lowercase_ : Tuple = None , lowercase_ : str = None , lowercase_ : List[str] = None , lowercase_ : Tuple = None , lowercase_ : str = None , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Any] = None , lowercase_ : Optional[int] = None , lowercase_ : Dict = ChannelDimension.FIRST , **lowercase_ : Tuple , ): lowercase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowercase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize lowercase_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ : Dict = crop_size if crop_size is not None else self.crop_size lowercase_ : int = get_size_dict(_UpperCAmelCase , param_name="""crop_size""" , default_to_square=_UpperCAmelCase ) lowercase_ : str = resample if resample is not None else self.resample lowercase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ : List[Any] = image_mean if image_mean is not None else self.image_mean lowercase_ : Union[str, Any] = image_std if image_std is not None else self.image_std lowercase_ : str = size if size is not None else self.size lowercase_ : List[str] = get_size_dict(_UpperCAmelCase ) if not is_batched(_UpperCAmelCase ): lowercase_ : Optional[int] = [images] if not valid_images(_UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowercase_ : Union[str, Any] = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: lowercase_ : Optional[int] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_center_crop: lowercase_ : Union[str, Any] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images] if do_rescale: lowercase_ : List[str] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: lowercase_ : Union[str, Any] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] lowercase_ : Union[str, Any] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] lowercase_ : Optional[Any] = {'''pixel_values''': images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
712
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _lowercase : str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __magic_name__ ( datasets.BuilderConfig): UpperCamelCase__ = None def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str: import pyspark def generate_fn(): lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" ) lowercase_ : Any = partition_df.collect() lowercase_ : Dict = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class __magic_name__ ( _BaseExamplesIterable): def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ): lowercase_ : Dict = df lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ): yield from self.generate_examples_fn() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ): lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return len(self.partition_order ) class __magic_name__ ( datasets.DatasetBuilder): UpperCamelCase__ = SparkConfig def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ): import pyspark lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowercase_ : Optional[int] = df lowercase_ : List[str] = working_dir super().__init__( cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , ) def SCREAMING_SNAKE_CASE_ ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(lowercase_ : str ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase_ ) lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase_ , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowercase_ : str = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): import pyspark def get_arrow_batch_size(lowercase_ : Any ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) lowercase_ : Union[str, Any] = self.df.count() lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowercase_ : Any = ( self.df.limit(lowercase_ ) .repartition(1 ) .mapInArrow(lowercase_ , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) ) lowercase_ : Any = self.df.repartition(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ): import pyspark lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath lowercase_ : Optional[Any] = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowercase_ : Tuple = self.config.features lowercase_ : Any = self._writer_batch_size lowercase_ : List[str] = self._fs.storage_options def write_arrow(lowercase_ : str ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId() lowercase_ : Dict = next(lowercase_ , lowercase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) lowercase_ : int = 0 lowercase_ : List[Any] = writer_class( features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowercase_ , lowercase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 lowercase_ : Any = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : List[str] = pa.Table.from_batches([batch] ) writer.write_table(lowercase_ ) if writer._num_bytes > 0: lowercase_ , lowercase_ : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase_ ) ): lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) ) shutil.move(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = ( self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ): self._validate_cache_dir() lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase_ ) lowercase_ : Tuple = not is_remote_filesystem(self._fs ) lowercase_ : int = os.path.join if is_local else posixpath.join lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN""" lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ ) lowercase_ : Any = 0 lowercase_ : Tuple = 0 lowercase_ : int = 0 lowercase_ : Dict = [] lowercase_ : Union[str, Any] = [] for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ): ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Union[str, Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase_ ) lowercase_ : List[str] = total_num_examples lowercase_ : int = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: lowercase_ : Tuple = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowercase_ : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowercase_ : int , lowercase_ : int , lowercase_ : int , ): rename( lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , ) lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 0 for i in range(len(lowercase_ ) ): lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i] for shard_id in range(lowercase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect() else: # don't use any pattern lowercase_ : List[str] = 0 lowercase_ : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
30
0
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowercase : Union[str, Any] = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def lowerCamelCase ( UpperCAmelCase__ : str ) -> int: config.addinivalue_line( """markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" ) config.addinivalue_line( """markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" ) config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" ) config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" ) config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" ) config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Union[str, Any]: from transformers.testing_utils import pytest_terminal_summary_main lowercase_ : Any = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(UpperCAmelCase__ , id=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) -> Union[str, Any]: if exitstatus == 5: lowercase_ : List[str] = 0 # Doctest custom flag to ignore output. _lowercase : List[Any] = doctest.register_optionflag("IGNORE_RESULT") _lowercase : List[Any] = doctest.OutputChecker class __magic_name__ ( _a): def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , _A , _A , _A ) _lowercase : Any = CustomOutputChecker _lowercase : str = HfDoctestModule _lowercase : List[Any] = HfDocTestParser
713
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Dict = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class __magic_name__ : def __init__( self : Optional[Any] , lowercase_ : Collection[float] | None = None ): if components is None: lowercase_ : Tuple = [] lowercase_ : int = list(__UpperCamelCase ) def __len__( self : str ): return len(self.__components ) def __str__( self : Tuple ): return "(" + ",".join(map(__UpperCamelCase , self.__components ) ) + ")" def __add__( self : Any , lowercase_ : Vector ): lowercase_ : int = len(self ) if size == len(__UpperCamelCase ): lowercase_ : Any = [self.__components[i] + other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )] return Vector(__UpperCamelCase ) else: raise Exception("""must have the same size""" ) def __sub__( self : Union[str, Any] , lowercase_ : Vector ): lowercase_ : int = len(self ) if size == len(__UpperCamelCase ): lowercase_ : List[Any] = [self.__components[i] - other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )] return Vector(__UpperCamelCase ) else: # error case raise Exception("""must have the same size""" ) @overload def __mul__( self : Dict , lowercase_ : float ): ... @overload def __mul__( self : Optional[Any] , lowercase_ : Vector ): ... def __mul__( self : Tuple , lowercase_ : float | Vector ): if isinstance(__UpperCamelCase , (float, int) ): lowercase_ : Union[str, Any] = [c * other for c in self.__components] return Vector(__UpperCamelCase ) elif isinstance(__UpperCamelCase , __UpperCamelCase ) and len(self ) == len(__UpperCamelCase ): lowercase_ : Tuple = len(self ) lowercase_ : Optional[Any] = [self.__components[i] * other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )] return sum(__UpperCamelCase ) else: # error case raise Exception("""invalid operand!""" ) def SCREAMING_SNAKE_CASE_ ( self : str ): return Vector(self.__components ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int ): if isinstance(__UpperCamelCase , __UpperCamelCase ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception("""index out of range""" ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int , lowercase_ : float ): assert -len(self.__components ) <= pos < len(self.__components ) lowercase_ : List[str] = value def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): if len(self.__components ) == 0: raise Exception("""Vector is empty""" ) lowercase_ : Dict = [c**2 for c in self.__components] return math.sqrt(sum(__UpperCamelCase ) ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Vector , lowercase_ : bool = False ): lowercase_ : Optional[int] = self * other lowercase_ : Tuple = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def lowerCamelCase ( UpperCAmelCase__ : int ): assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) return Vector([0] * dimension ) def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ): assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (isinstance(UpperCAmelCase__ , UpperCAmelCase__ )) lowercase_ : Any = [0] * dimension lowercase_ : Optional[Any] = 1 return Vector(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : Vector , UpperCAmelCase__ : Vector ): assert ( isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (isinstance(UpperCAmelCase__ , (int, float) )) ) return x * scalar + y def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): random.seed(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = [random.randint(UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )] return Vector(UpperCAmelCase__ ) class __magic_name__ : def __init__( self : Dict , lowercase_ : list[list[float]] , lowercase_ : int , lowercase_ : int ): lowercase_ : List[Any] = matrix lowercase_ : int = w lowercase_ : int = h def __str__( self : Dict ): lowercase_ : List[str] = """""" for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self : Dict , lowercase_ : Matrix ): if self.__width == other.width() and self.__height == other.height(): lowercase_ : Tuple = [] for i in range(self.__height ): lowercase_ : Tuple = [ self.__matrix[i][j] + other.component(__UpperCamelCase , __UpperCamelCase ) for j in range(self.__width ) ] matrix.append(__UpperCamelCase ) return Matrix(__UpperCamelCase , self.__width , self.__height ) else: raise Exception("""matrix must have the same dimension!""" ) def __sub__( self : Tuple , lowercase_ : Matrix ): if self.__width == other.width() and self.__height == other.height(): lowercase_ : List[Any] = [] for i in range(self.__height ): lowercase_ : Union[str, Any] = [ self.__matrix[i][j] - other.component(__UpperCamelCase , __UpperCamelCase ) for j in range(self.__width ) ] matrix.append(__UpperCamelCase ) return Matrix(__UpperCamelCase , self.__width , self.__height ) else: raise Exception("""matrices must have the same dimension!""" ) @overload def __mul__( self : Optional[int] , lowercase_ : float ): ... @overload def __mul__( self : List[str] , lowercase_ : Vector ): ... def __mul__( self : Union[str, Any] , lowercase_ : float | Vector ): if isinstance(__UpperCamelCase , __UpperCamelCase ): # matrix-vector if len(__UpperCamelCase ) == self.__width: lowercase_ : List[str] = zero_vector(self.__height ) for i in range(self.__height ): lowercase_ : List[Any] = [ self.__matrix[i][j] * other.component(__UpperCamelCase ) for j in range(self.__width ) ] ans.change_component(__UpperCamelCase , sum(__UpperCamelCase ) ) return ans else: raise Exception( """vector must have the same size as the """ """number of columns of the matrix!""" ) elif isinstance(__UpperCamelCase , (int, float) ): # matrix-scalar lowercase_ : List[str] = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(__UpperCamelCase , self.__width , self.__height ) return None def SCREAMING_SNAKE_CASE_ ( self : Any ): return self.__height def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return self.__width def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("""change_component: indices out of bounds""" ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : float ): if 0 <= x < self.__height and 0 <= y < self.__width: lowercase_ : int = value else: raise Exception("""change_component: indices out of bounds""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): if self.__height != self.__width: raise Exception("""Matrix is not square""" ) lowercase_ : Tuple = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__UpperCamelCase ) ): lowercase_ : int = minor[i][:y] + minor[i][y + 1 :] return Matrix(__UpperCamelCase , self.__width - 1 , self.__height - 1 ).determinant() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : int ): if self.__height != self.__width: raise Exception("""Matrix is not square""" ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__UpperCamelCase , __UpperCamelCase ) else: raise Exception("""Indices out of bounds""" ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): if self.__height != self.__width: raise Exception("""Matrix is not square""" ) if self.__height < 1: raise Exception("""Matrix has no element""" ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: lowercase_ : Any = [ self.__matrix[0][y] * self.cofactor(0 , __UpperCamelCase ) for y in range(self.__width ) ] return sum(__UpperCamelCase ) def lowerCamelCase ( UpperCAmelCase__ : int ): lowercase_ : List[str] = [[0] * n for _ in range(UpperCAmelCase__ )] return Matrix(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): random.seed(UpperCAmelCase__ ) lowercase_ : Optional[int] = [ [random.randint(UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ ) ] return Matrix(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
714
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def lowerCamelCase ( ) -> None: lowercase_ : List[Any] = input("""Enter message: """ ) lowercase_ : str = input("""Enter key [alphanumeric]: """ ) lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): lowercase_ : List[str] = """encrypt""" lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) elif mode.lower().startswith("""d""" ): lowercase_ : Any = """decrypt""" lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''\n{mode.title()}ed message:''' ) print(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: lowercase_ : Union[str, Any] = [] lowercase_ : List[Any] = 0 lowercase_ : str = key.upper() for symbol in message: lowercase_ : Tuple = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(UpperCAmelCase__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(UpperCAmelCase__ ): lowercase_ : Any = 0 else: translated.append(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) if __name__ == "__main__": main()
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> bool: lowercase_ : str = len(_UpperCamelCase ) lowercase_ : Optional[int] = len(_UpperCamelCase ) lowercase_ : Any = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] lowercase_ : Union[str, Any] = True for i in range(_UpperCamelCase ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowercase_ : str = True if a[i].islower(): lowercase_ : Optional[Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
715
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = """ylacombe/bark-small""" lowercase_ : List[str] = tempfile.mkdtemp() lowercase_ : Tuple = """en_speaker_1""" lowercase_ : Union[str, Any] = """This is a test string""" lowercase_ : int = """speaker_embeddings_path.json""" lowercase_ : Any = """speaker_embeddings""" def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ): return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = self.get_tokenizer() lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase_ : Optional[int] = 35 lowercase_ : int = 2 lowercase_ : Union[str, Any] = 8 lowercase_ : Union[str, Any] = { """semantic_prompt""": np.ones(lowercase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : Dict = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : List[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_tokenizer() lowercase_ : int = BarkProcessor(tokenizer=lowercase_ ) lowercase_ : Any = processor(text=self.input_string ) lowercase_ : List[str] = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
30
0
'''simple docstring''' # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: lowercase_ : Optional[int] = [False] * len(UpperCAmelCase__ ) lowercase_ : List[str] = [-1] * len(UpperCAmelCase__ ) def dfs(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ): lowercase_ : str = True lowercase_ : Optional[Any] = c for u in graph[v]: if not visited[u]: dfs(UpperCAmelCase__ , 1 - c ) for i in range(len(UpperCAmelCase__ ) ): if not visited[i]: dfs(UpperCAmelCase__ , 0 ) for i in range(len(UpperCAmelCase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _lowercase : Optional[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
716
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True}) UpperCamelCase__ = Features({'''image''': Image()}) UpperCamelCase__ = Features({'''labels''': ClassLabel}) UpperCamelCase__ = "image" UpperCamelCase__ = "labels" def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowercase_ : List[str] = copy.deepcopy(self ) lowercase_ : List[str] = self.label_schema.copy() lowercase_ : List[Any] = features[self.label_column] lowercase_ : Optional[Any] = label_schema return task_template @property def SCREAMING_SNAKE_CASE_ ( self : int ): return { self.image_column: "image", self.label_column: "labels", }
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowercase : Dict = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["MobileViTFeatureExtractor"] _lowercase : Optional[Any] = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys _lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
717
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowercase_ : str = 1.5 lowercase_ : List[Any] = int(factor * num_class_images ) lowercase_ : int = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ : List[str] = client.query(text=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase_ : List[str] = int(factor * num_images ) lowercase_ : List[str] = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , ) lowercase_ : List[str] = 0 lowercase_ : Dict = 0 lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open( F'''{class_data_dir}/images.txt''' , """w""" ) as fa: while total < num_class_images: lowercase_ : str = class_images[count] count += 1 try: lowercase_ : Union[str, Any] = requests.get(images["""url"""] ) if img.status_code == 200: lowercase_ : List[str] = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ ) return parser.parse_args() if __name__ == "__main__": _lowercase : Dict = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
30
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : List[Any] = logging.get_logger(__name__) _lowercase : Optional[int] = { 'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json', 'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json', 'uclanlp/visualbert-vqa-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json', 'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json', 'uclanlp/visualbert-vcr-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class __magic_name__ ( UpperCamelCase_): UpperCamelCase__ = '''visual_bert''' def __init__( self : str , lowercase_ : int=30522 , lowercase_ : List[str]=768 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[Any]=12 , lowercase_ : str=12 , lowercase_ : List[str]=3072 , lowercase_ : Any="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=512 , lowercase_ : int=2 , lowercase_ : str=0.02 , lowercase_ : Tuple=1E-12 , lowercase_ : Optional[Any]=False , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : List[str]=0 , lowercase_ : Dict=2 , **lowercase_ : Optional[Any] , ): super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowercase_ : Optional[int] = vocab_size lowercase_ : Optional[Any] = max_position_embeddings lowercase_ : Optional[Any] = hidden_size lowercase_ : str = visual_embedding_dim lowercase_ : Optional[int] = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : List[Any] = intermediate_size lowercase_ : Tuple = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Optional[Any] = initializer_range lowercase_ : str = type_vocab_size lowercase_ : Union[str, Any] = layer_norm_eps lowercase_ : Union[str, Any] = bypass_transformer lowercase_ : Optional[Any] = special_visual_initialize
718
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None: if start is None: lowercase_ : Any = 0 if end is None: lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1 if start >= end: return lowercase_ : Optional[int] = (start + end) // 2 slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ ) if sequence[end] < sequence[mid]: lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end] slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
30
0
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor _lowercase : List[str] = random.Random() def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=None ) -> int: if rng is None: lowercase_ : Optional[Any] = global_rng lowercase_ : List[str] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __magic_name__ ( unittest.TestCase): def __init__( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict=7 , lowercase_ : Dict=400 , lowercase_ : Any=2000 , lowercase_ : List[Any]=24 , lowercase_ : Union[str, Any]=24 , lowercase_ : str=0.0 , lowercase_ : Dict=16000 , lowercase_ : Optional[int]=True , lowercase_ : Dict=True , ): lowercase_ : Union[str, Any] = parent lowercase_ : int = batch_size lowercase_ : int = min_seq_length lowercase_ : str = max_seq_length lowercase_ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase_ : List[Any] = feature_size lowercase_ : List[Any] = num_mel_bins lowercase_ : Optional[int] = padding_value lowercase_ : Union[str, Any] = sampling_rate lowercase_ : str = return_attention_mask lowercase_ : Union[str, Any] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : Any ): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Tuple=False , lowercase_ : Optional[int]=False ): def _flatten(lowercase_ : Optional[int] ): return list(itertools.chain(*lowercase_ ) ) if equal_length: lowercase_ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowercase_ : List[str] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowercase_ : List[str] = [np.asarray(lowercase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __magic_name__ ( lowercase_, unittest.TestCase): UpperCamelCase__ = SpeechaTextFeatureExtractor if is_speech_available() else None def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Any = SpeechaTextFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict ): self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1E-3 ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): # Tests that all call wrap to encode_plus and batch_encode_plus lowercase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : Dict = [np.asarray(lowercase_ ) for speech_input in speech_inputs] # Test feature size lowercase_ : Any = feature_extractor(lowercase_ , padding=lowercase_ , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowercase_ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features lowercase_ : Any = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test batched lowercase_ : int = feature_extractor(lowercase_ , return_tensors="""np""" ).input_features lowercase_ : Optional[Any] = feature_extractor(lowercase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowercase_ : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowercase_ : int = np.asarray(lowercase_ ) lowercase_ : Tuple = feature_extractor(lowercase_ , return_tensors="""np""" ).input_features lowercase_ : int = feature_extractor(lowercase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : List[str] = ["""longest""", """max_length""", """do_not_pad"""] lowercase_ : List[Any] = [None, 16, None] for max_length, padding in zip(lowercase_ , lowercase_ ): lowercase_ : Any = feature_extractor( lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_attention_mask=lowercase_ ) lowercase_ : Any = inputs.input_features lowercase_ : str = inputs.attention_mask lowercase_ : Any = [np.sum(lowercase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : Dict = ["""longest""", """max_length""", """do_not_pad"""] lowercase_ : Optional[Any] = [None, 16, None] for max_length, padding in zip(lowercase_ , lowercase_ ): lowercase_ : Tuple = feature_extractor( lowercase_ , max_length=lowercase_ , padding=lowercase_ , return_tensors="""np""" , return_attention_mask=lowercase_ ) lowercase_ : Optional[Any] = inputs.input_features lowercase_ : Optional[Any] = inputs.attention_mask lowercase_ : Union[str, Any] = [np.sum(lowercase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : Any = feature_extractor( lowercase_ , padding="""max_length""" , max_length=4 , truncation=lowercase_ , return_tensors="""np""" , return_attention_mask=lowercase_ , ) lowercase_ : Dict = inputs.input_features lowercase_ : Union[str, Any] = inputs.attention_mask lowercase_ : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : Optional[int] = feature_extractor( lowercase_ , padding="""longest""" , max_length=4 , truncation=lowercase_ , return_tensors="""np""" , return_attention_mask=lowercase_ , ) lowercase_ : Dict = inputs.input_features lowercase_ : Tuple = inputs.attention_mask lowercase_ : str = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) lowercase_ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : Union[str, Any] = feature_extractor( lowercase_ , padding="""longest""" , max_length=16 , truncation=lowercase_ , return_tensors="""np""" , return_attention_mask=lowercase_ , ) lowercase_ : List[Any] = inputs.input_features lowercase_ : List[Any] = inputs.attention_mask lowercase_ : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): import torch lowercase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : int = np.random.rand(100 , 32 ).astype(np.floataa ) lowercase_ : Optional[int] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowercase_ : List[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowercase_ : Union[str, Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ): from datasets import load_dataset lowercase_ : Dict = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech lowercase_ : Optional[int] = ds.sort("""id""" ).select(range(lowercase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): # fmt: off lowercase_ : Optional[int] = np.array([ -1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41, -1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28, -1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25, ] ) # fmt: on lowercase_ : List[str] = self._load_datasamples(1 ) lowercase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : Union[str, Any] = feature_extractor(lowercase_ , return_tensors="""pt""" ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4 ) )
719
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowercase : Dict = parser.parse_args() _lowercase : Dict = "cpu" _lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowercase : Any = "path-to-your-trained-model" _lowercase : str = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowercase : Any = pipe.to(device) # to channels last _lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last) _lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last) _lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowercase : int = torch.randn(2, 4, 64, 64) _lowercase : int = torch.rand(1) * 999 _lowercase : Union[str, Any] = torch.randn(2, 77, 768) _lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status) try: _lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowercase : int = 666 _lowercase : Any = torch.Generator(device).manual_seed(seed) _lowercase : int = {"generator": generator} if args.steps is not None: _lowercase : Optional[int] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
30
0
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = 0 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : int = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Optional[Any] = Path(lowercase_ ) / """preprocessor_config.json""" lowercase_ : Optional[int] = Path(lowercase_ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowercase_ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(lowercase_ , """w""" ) ) lowercase_ : Tuple = AutoImageProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Tuple = Path(lowercase_ ) / """preprocessor_config.json""" lowercase_ : Union[str, Any] = Path(lowercase_ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowercase_ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(lowercase_ , """w""" ) ) lowercase_ : int = AutoImageProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Tuple = CLIPConfig() # Create a dummy config file with image_proceesor_type lowercase_ : Union[str, Any] = Path(lowercase_ ) / """preprocessor_config.json""" lowercase_ : Optional[int] = Path(lowercase_ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowercase_ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(lowercase_ , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally lowercase_ : Optional[Any] = AutoImageProcessor.from_pretrained(lowercase_ ).to_dict() config_dict.pop("""image_processor_type""" ) lowercase_ : List[Any] = CLIPImageProcessor(**lowercase_ ) # save in new folder model_config.save_pretrained(lowercase_ ) config.save_pretrained(lowercase_ ) lowercase_ : Optional[Any] = AutoImageProcessor.from_pretrained(lowercase_ ) # make sure private variable is not incorrectly saved lowercase_ : Optional[int] = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Optional[int] = Path(lowercase_ ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowercase_ , """w""" ) , ) lowercase_ : Any = AutoImageProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): with self.assertRaisesRegex( lowercase_ , """clip-base is not a local folder and is not a valid model identifier""" ): lowercase_ : Optional[Any] = AutoImageProcessor.from_pretrained("""clip-base""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): with self.assertRaisesRegex( lowercase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowercase_ : int = AutoImageProcessor.from_pretrained(lowercase_ , revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): with self.assertRaisesRegex( lowercase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): lowercase_ : str = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): with self.assertRaises(lowercase_ ): lowercase_ : Tuple = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase_ ): lowercase_ : List[str] = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowercase_ ) lowercase_ : str = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowercase_ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def SCREAMING_SNAKE_CASE_ ( self : int ): try: AutoConfig.register("""custom""" , lowercase_ ) AutoImageProcessor.register(lowercase_ , lowercase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase_ ): AutoImageProcessor.register(lowercase_ , lowercase_ ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : List[Any] = Path(lowercase_ ) / """preprocessor_config.json""" lowercase_ : Optional[Any] = Path(lowercase_ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowercase_ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(lowercase_ , """w""" ) ) lowercase_ : Optional[int] = CustomImageProcessor.from_pretrained(lowercase_ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(lowercase_ ) lowercase_ : Any = AutoImageProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE_ ( self : List[str] ): class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = True try: AutoConfig.register("""custom""" , lowercase_ ) AutoImageProcessor.register(lowercase_ , lowercase_ ) # If remote code is not set, the default is to use local lowercase_ : List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. lowercase_ : Optional[Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowercase_ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub lowercase_ : Optional[int] = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowercase_ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(lowercase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
720
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Optional[Any] = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowercase : List[Any] = logging.getLogger(__name__) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] ) -> str: # save results if os.path.exists(UpperCAmelCase__ ): if os.path.exists(os.path.join(UpperCAmelCase__ , """config.json""" ) ) and os.path.isfile( os.path.join(UpperCAmelCase__ , """config.json""" ) ): os.remove(os.path.join(UpperCAmelCase__ , """config.json""" ) ) if os.path.exists(os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) ) and os.path.isfile( os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) ): os.remove(os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) ) else: os.makedirs(UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=False ) -> Optional[int]: lowercase_ : Tuple = 2 if unlogit: lowercase_ : List[Any] = torch.pow(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[str] = p * torch.log(UpperCAmelCase__ ) lowercase_ : List[str] = 0 return -plogp.sum(dim=-1 ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Dict: logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(UpperCAmelCase__ ) ) ) ) for row in range(len(UpperCAmelCase__ ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[str]=False ) -> Tuple: lowercase_ , lowercase_ : Optional[Any] = model.config.num_hidden_layers, model.config.num_attention_heads lowercase_ : Dict = torch.zeros(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device ) lowercase_ : Dict = torch.zeros(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device ) if head_mask is None: lowercase_ : int = torch.ones(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device ) head_mask.requires_grad_(requires_grad=UpperCAmelCase__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowercase_ : Optional[int] = None lowercase_ : Optional[int] = 0.0 lowercase_ : Dict = 0.0 for step, inputs in enumerate(tqdm(UpperCAmelCase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ): lowercase_ : Optional[int] = tuple(t.to(args.device ) for t in inputs ) ((lowercase_ ) , ) : Any = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowercase_ : Tuple = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , head_mask=UpperCAmelCase__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowercase_ , lowercase_ , lowercase_ : int = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(UpperCAmelCase__ ): lowercase_ : List[str] = entropy(attn.detach() , UpperCAmelCase__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(UpperCAmelCase__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowercase_ : Union[str, Any] = 2 lowercase_ : int = torch.pow(torch.pow(UpperCAmelCase__ , UpperCAmelCase__ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: lowercase_ : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("""Attention entropies""" ) print_ad_tensor(UpperCAmelCase__ ) if compute_importance: logger.info("""Head importance scores""" ) print_ad_tensor(UpperCAmelCase__ ) logger.info("""Head ranked by importance scores""" ) lowercase_ : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowercase_ : Dict = torch.arange( head_importance.numel() , device=args.device ) lowercase_ : Union[str, Any] = head_ranks.view_as(UpperCAmelCase__ ) print_ad_tensor(UpperCAmelCase__ ) return attn_entropy, head_importance, total_loss def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ) -> Dict: lowercase_ , lowercase_ , lowercase_ : Dict = compute_heads_importance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ ) lowercase_ : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info("""Pruning: original score: %f, threshold: %f""" , UpperCAmelCase__ , original_score * args.masking_threshold ) lowercase_ : Dict = torch.ones_like(UpperCAmelCase__ ) lowercase_ : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowercase_ : int = original_score while current_score >= original_score * args.masking_threshold: lowercase_ : str = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowercase_ : Tuple = float("""Inf""" ) lowercase_ : Tuple = head_importance.view(-1 ).sort()[1] if len(UpperCAmelCase__ ) <= num_to_mask: print("""BREAK BY num_to_mask""" ) break # mask heads lowercase_ : List[Any] = current_heads_to_mask[:num_to_mask] logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) ) lowercase_ : Union[str, Any] = new_head_mask.view(-1 ) lowercase_ : Optional[Any] = 0.0 lowercase_ : Union[str, Any] = new_head_mask.view_as(UpperCAmelCase__ ) lowercase_ : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(UpperCAmelCase__ ) # Compute metric and head importance again lowercase_ , lowercase_ , lowercase_ : int = compute_heads_importance( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , head_mask=UpperCAmelCase__ ) lowercase_ : Optional[Any] = 1 / loss logger.info( """Masking: current score: %f, remaining heads %d (%.1f percents)""" , UpperCAmelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("""Final head mask""" ) print_ad_tensor(UpperCAmelCase__ ) np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() ) return head_mask def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> Any: lowercase_ : str = datetime.now() lowercase_ , lowercase_ , lowercase_ : Tuple = compute_heads_importance( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , compute_importance=UpperCAmelCase__ , head_mask=UpperCAmelCase__ ) lowercase_ : int = 1 / loss lowercase_ : int = datetime.now() - before_time lowercase_ : str = sum(p.numel() for p in model.parameters() ) lowercase_ : str = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase__ ) ) } for k, v in heads_to_prune.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Tuple = [ v, ] assert sum(len(UpperCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(UpperCAmelCase__ ) lowercase_ : Any = sum(p.numel() for p in model.parameters() ) lowercase_ : str = datetime.now() lowercase_ , lowercase_ , lowercase_ : Any = compute_heads_importance( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , compute_importance=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , actually_pruned=UpperCAmelCase__ , ) lowercase_ : Optional[int] = 1 / loss lowercase_ : List[Any] = datetime.now() - before_time logger.info( """Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , UpperCAmelCase__ , UpperCAmelCase__ , pruned_num_params / original_num_params * 100 , ) logger.info("""Pruning: score with masking: %f score with pruning: %f""" , UpperCAmelCase__ , UpperCAmelCase__ ) logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 ) save_model(UpperCAmelCase__ , args.output_dir ) def lowerCamelCase ( ) -> List[Any]: lowercase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--data_dir""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , ) parser.add_argument( """--model_name_or_path""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--output_dir""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , ) # Other parameters parser.add_argument( """--config_name""" , default="""""" , type=UpperCAmelCase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , ) parser.add_argument( """--tokenizer_name""" , default="""""" , type=UpperCAmelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , ) parser.add_argument( """--cache_dir""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , ) parser.add_argument( """--data_subset""" , type=UpperCAmelCase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" ) parser.add_argument( """--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) parser.add_argument( """--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don\'t normalize importance score by layers""" ) parser.add_argument( """--dont_normalize_global_importance""" , action="""store_true""" , help="""Don\'t normalize all importance scores between 0 and 1""" , ) parser.add_argument( """--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" ) parser.add_argument( """--masking_threshold""" , default=0.9 , type=UpperCAmelCase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , ) parser.add_argument( """--masking_amount""" , default=0.1 , type=UpperCAmelCase__ , help="""Amount to heads to masking at each masking step.""" ) parser.add_argument("""--metric_name""" , default="""acc""" , type=UpperCAmelCase__ , help="""Metric to use for head masking.""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=UpperCAmelCase__ , help=( """The maximum total input sequence length after WordPiece tokenization. \n""" """Sequences longer than this will be truncated, sequences shorter padded.""" ) , ) parser.add_argument("""--batch_size""" , default=1 , type=UpperCAmelCase__ , help="""Batch size.""" ) parser.add_argument("""--seed""" , type=UpperCAmelCase__ , default=42 ) parser.add_argument("""--local_rank""" , type=UpperCAmelCase__ , default=-1 , help="""local_rank for distributed training on gpus""" ) parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" ) parser.add_argument("""--server_ip""" , type=UpperCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=UpperCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" ) lowercase_ : Union[str, Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowercase_ : Optional[int] = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" ) lowercase_ : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowercase_ : Union[str, Any] = torch.device("""cuda""" , args.local_rank ) lowercase_ : Union[str, Any] = 1 torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowercase_ : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowercase_ : str = nn.parallel.DistributedDataParallel( UpperCAmelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase__ ) elif args.n_gpu > 1: lowercase_ : List[str] = nn.DataParallel(UpperCAmelCase__ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=UpperCAmelCase__ ) torch.save(UpperCAmelCase__ , os.path.join(args.output_dir , """run_args.bin""" ) ) logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ ) # Prepare dataset lowercase_ : Optional[int] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowercase_ : Any = (torch.from_numpy(UpperCAmelCase__ ),) lowercase_ : List[str] = TensorDataset(*UpperCAmelCase__ ) lowercase_ : Optional[int] = RandomSampler(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = DataLoader(UpperCAmelCase__ , sampler=UpperCAmelCase__ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowercase_ : str = mask_heads(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) prune_heads(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": main()
721
'''simple docstring''' import unittest import numpy as np def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray: lowercase_ : List[Any] = np.shape(UpperCAmelCase__ ) lowercase_ : Dict = np.shape(UpperCAmelCase__ ) lowercase_ : int = np.shape(UpperCAmelCase__ ) if shape_a[0] != shape_b[0]: lowercase_ : Optional[int] = ( """Expected the same number of rows for A and B. """ F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(UpperCAmelCase__ ) if shape_b[1] != shape_c[1]: lowercase_ : Optional[Any] = ( """Expected the same number of columns for B and C. """ F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(UpperCAmelCase__ ) lowercase_ : Any = pseudo_inv if a_inv is None: try: lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Dict = np.array([[2, 1], [6, 3]] ) lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] ) lowercase_ : Optional[int] = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) self.assertAlmostEqual(lowercase_ , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
0
'''simple docstring''' import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) UpperCamelCase__ = '''CIDAS/clipseg-rd64-refined''' UpperCamelCase__ = '''image_segmenter''' UpperCamelCase__ = CLIPSegForImageSegmentation UpperCamelCase__ = ['''image''', '''text'''] UpperCamelCase__ = ['''image'''] def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : int ): requires_backends(self , ["""vision"""] ) super().__init__(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : "Image" , lowercase_ : str ): return self.pre_processor(text=[label] , images=[image] , padding=lowercase_ , return_tensors="""pt""" ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Dict ): with torch.no_grad(): lowercase_ : Optional[Any] = self.model(**lowercase_ ).logits return logits def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[str] ): lowercase_ : str = outputs.cpu().detach().numpy() lowercase_ : Optional[int] = 0 lowercase_ : Optional[Any] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
700
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(UpperCAmelCase__ ) lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data ) lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6) else: lowercase_ : Union[str, Any] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode() + padding ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[str] = ( """argument should be a bytes-like object or ASCII string, """ F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(UpperCAmelCase__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): try: lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowercase_ : Any = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase_ : Optional[int] = encoded_data[:-padding] lowercase_ : Any = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase_ : int = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data ) lowercase_ : Optional[int] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(UpperCAmelCase__ ) , 8 ) ] return bytes(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : Any = { "configuration_clap": [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", ], "processing_clap": ["ClapProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ] _lowercase : Dict = ["ClapFeatureExtractor"] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys _lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
701
'''simple docstring''' import argparse _lowercase : Optional[int] = "docs/source/_static/js/custom.js" def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict: with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Optional[int] = f.readlines() lowercase_ : Tuple = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += F''' "v{version}": "v{version}",\n''' with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") _lowercase : Dict = parser.parse_args() update_custom_js(args.version)
30
0
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList _lowercase : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"] class __magic_name__ ( _UpperCAmelCase): '''simple docstring''' def __init__( self : Optional[int] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Any=None , lowercase_ : Union[str, Any]=1 ): lowercase_ : List[str] = tokenizer lowercase_ : Optional[Any] = dataset lowercase_ : int = len(lowercase_ ) if n_tasks is None else n_tasks lowercase_ : Optional[int] = n_copies def __iter__( self : List[str] ): lowercase_ : Optional[int] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() ) lowercase_ : str = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors="""pt""" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __magic_name__ ( _UpperCAmelCase): '''simple docstring''' def __init__( self : Any , lowercase_ : List[str] , lowercase_ : str , lowercase_ : List[Any] ): lowercase_ : List[str] = start_length lowercase_ : Optional[Any] = eof_strings lowercase_ : str = tokenizer def __call__( self : Optional[int] , lowercase_ : str , lowercase_ : List[Any] , **lowercase_ : List[str] ): lowercase_ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) lowercase_ : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(lowercase_ ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> List[str]: lowercase_ : Optional[Any] = re.split("""(%s)""" % """|""".join(UpperCAmelCase__ ) , UpperCAmelCase__ ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=20 , **UpperCAmelCase__ : List[Any] ) -> Dict: lowercase_ : str = defaultdict(UpperCAmelCase__ ) # dict of list of generated tokens for step, batch in tqdm(enumerate(UpperCAmelCase__ ) ): with torch.no_grad(): lowercase_ : str = batch["""ids"""].shape[-1] lowercase_ : int = accelerator.unwrap_model(UpperCAmelCase__ ).generate( input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=UpperCAmelCase__ , **UpperCAmelCase__ ) # each task is generated batch_size times lowercase_ : Tuple = batch["""task_id"""].repeat(UpperCAmelCase__ ) lowercase_ : str = accelerator.pad_across_processes( UpperCAmelCase__ , dim=1 , pad_index=tokenizer.pad_token_id ) lowercase_ : Optional[int] = accelerator.gather((generated_tokens, generated_tasks) ) lowercase_ : Optional[Any] = generated_tokens.cpu().numpy() lowercase_ : List[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(UpperCAmelCase__ , UpperCAmelCase__ ): gen_token_dict[task].append(UpperCAmelCase__ ) lowercase_ : str = [[] for _ in range(UpperCAmelCase__ )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: lowercase_ : List[Any] = tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) code_gens[task].append(remove_last_block(UpperCAmelCase__ ) ) return code_gens def lowerCamelCase ( ) -> List[str]: # Setup configuration lowercase_ : str = HfArgumentParser(UpperCAmelCase__ ) lowercase_ : int = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric lowercase_ : Any = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing lowercase_ : Optional[Any] = """false""" if args.num_workers is None: lowercase_ : Any = multiprocessing.cpu_count() # Use dataset load to feed to accelerate lowercase_ : Optional[Any] = Accelerator() set_seed(args.seed , device_specific=UpperCAmelCase__ ) # Load model and tokenizer lowercase_ : str = AutoTokenizer.from_pretrained(args.model_ckpt ) lowercase_ : str = tokenizer.eos_token lowercase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings lowercase_ : Union[str, Any] = { """do_sample""": args.do_sample, """temperature""": args.temperature, """max_new_tokens""": args.max_new_tokens, """top_p""": args.top_p, """top_k""": args.top_k, """stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCAmelCase__ , UpperCAmelCase__ )] ), } # Load evaluation dataset and metric lowercase_ : Optional[int] = load_dataset("""openai_humaneval""" ) lowercase_ : Optional[int] = load_metric("""code_eval""" ) lowercase_ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] ) lowercase_ : Union[str, Any] = args.n_samples // args.batch_size lowercase_ : int = TokenizedDataset(UpperCAmelCase__ , human_eval["""test"""] , n_copies=UpperCAmelCase__ , n_tasks=UpperCAmelCase__ ) # do not confuse args.batch_size, which is actually the num_return_sequences lowercase_ : Tuple = DataLoader(UpperCAmelCase__ , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: lowercase_ : Dict = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] ) except ValueError as exception: print( """Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`""" """ flag to enable code evaluation.""" ) raise exception lowercase_ : Union[str, Any] = accelerator.prepare(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Optional[int] = complete_code( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , n_tasks=UpperCAmelCase__ , batch_size=args.batch_size , **UpperCAmelCase__ , ) if accelerator.is_main_process: lowercase_ : List[Any] = [] for task in tqdm(range(UpperCAmelCase__ ) ): lowercase_ : str = human_eval["""test"""][task]["""test"""] lowercase_ : Dict = F'''check({human_eval['test'][task]['entry_point']})''' references.append("""\n""" + test_func + """\n""" + entry_point ) # Evaluate completions with "code_eval" metric lowercase_ : str = code_eval_metric.compute( references=UpperCAmelCase__ , predictions=UpperCAmelCase__ , num_workers=args.num_workers ) print(F'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , """w""" ) as fp: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
702
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ): lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : Any = image_size lowercase_ : Optional[Any] = num_channels lowercase_ : Any = embeddings_size lowercase_ : Union[str, Any] = hidden_sizes lowercase_ : Any = depths lowercase_ : Dict = is_training lowercase_ : Tuple = use_labels lowercase_ : str = hidden_act lowercase_ : Optional[Any] = num_labels lowercase_ : Tuple = scope lowercase_ : Any = len(lowercase_ ) lowercase_ : Optional[Any] = out_features lowercase_ : Tuple = out_indices lowercase_ : str = num_groups def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : List[Any] = None if self.use_labels: lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : int = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ): lowercase_ : Optional[int] = BitModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ): lowercase_ : Union[str, Any] = self.num_labels lowercase_ : Tuple = BitForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Any = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ): lowercase_ : Any = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Dict = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase_ : List[str] = None lowercase_ : Dict = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[int] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs lowercase_ : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = BitModelTester(self ) lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return @unittest.skip(reason="""Bit does not output attentions""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[Any] = model_class(lowercase_ ) lowercase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(config=lowercase_ ) for name, module in model.named_modules(): if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : Optional[Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase_ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Dict = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase_ : Union[str, Any] = layer_type lowercase_ : Optional[Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Union[str, Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ ) lowercase_ : int = self.default_image_processor lowercase_ : List[Any] = prepare_img() lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : str = model(**lowercase_ ) # verify the logits lowercase_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitBackbone,) if is_torch_available() else () UpperCamelCase__ = BitConfig UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Union[str, Any] = BitModelTester(self )
30
0
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __magic_name__ ( unittest.TestCase): UpperCamelCase__ = JukeboxTokenizer UpperCamelCase__ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def SCREAMING_SNAKE_CASE_ ( self : List[str] ): import torch lowercase_ : str = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) lowercase_ : str = tokenizer(**self.metas )["""input_ids"""] # fmt: off lowercase_ : Tuple = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Any ): import torch lowercase_ : Tuple = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) lowercase_ : str = tokenizer(**self.metas )["""input_ids"""] # fmt: off lowercase_ : Dict = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
703
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge _lowercase : Optional[Any] = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] _lowercase : List[Any] = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowerCamelCase ( ) -> List[str]: lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] ) assert ( pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean() ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Tuple = """rougeLsum""" lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCamelCase ( ) -> List[Any]: lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""] lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) assert score_sep == score_no_sep def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Union[str, Any] = [ """Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""", """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""", ] lowercase_ : List[str] = [ """Margot Frank, died in 1945, a month earlier than previously thought.""", """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of""" """ the final seconds on board Flight 9525.""", ] assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) def lowerCamelCase ( ) -> Union[str, Any]: lowercase_ : Optional[Any] = [ """\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """ ] lowercase_ : List[Any] = [ """ Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .""" ] lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""] lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""] assert new_score > prev_score def lowerCamelCase ( ) -> Tuple: lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" ) lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = calculate_rouge_path( data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: if not (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) lowercase_ : Optional[int] = len(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) lowercase_ : str = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] lowercase_ : Union[str, Any] = 0 lowercase_ : Dict = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: lowercase_ : int = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: lowercase_ : List[Any] = i lowercase_ : str = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
704
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''speech_to_text''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ): lowercase_ : List[Any] = vocab_size lowercase_ : str = d_model lowercase_ : List[Any] = encoder_ffn_dim lowercase_ : str = encoder_layers lowercase_ : Dict = encoder_attention_heads lowercase_ : str = decoder_ffn_dim lowercase_ : int = decoder_layers lowercase_ : Any = decoder_attention_heads lowercase_ : Any = dropout lowercase_ : Dict = attention_dropout lowercase_ : Optional[int] = activation_dropout lowercase_ : Any = activation_function lowercase_ : Union[str, Any] = init_std lowercase_ : str = encoder_layerdrop lowercase_ : Optional[int] = decoder_layerdrop lowercase_ : Dict = use_cache lowercase_ : Union[str, Any] = encoder_layers lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True lowercase_ : Dict = max_source_positions lowercase_ : Optional[int] = max_target_positions lowercase_ : Tuple = num_conv_layers lowercase_ : Tuple = list(lowercase_ ) lowercase_ : Union[str, Any] = conv_channels lowercase_ : str = input_feat_per_channel lowercase_ : str = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
30
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = KandinskyInpaintPipeline UpperCamelCase__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] UpperCamelCase__ = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] UpperCamelCase__ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] UpperCamelCase__ = False @property def SCREAMING_SNAKE_CASE_ ( self : int ): return 32 @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return 32 @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): return self.time_input_dim @property def SCREAMING_SNAKE_CASE_ ( self : str ): return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return 100 @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def SCREAMING_SNAKE_CASE_ ( self : int ): torch.manual_seed(0 ) lowercase_ : int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) lowercase_ : List[str] = MultilingualCLIP(lowercase_ ) lowercase_ : Any = text_encoder.eval() return text_encoder @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): torch.manual_seed(0 ) lowercase_ : int = { """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } lowercase_ : Dict = UNetaDConditionModel(**lowercase_ ) return model @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): torch.manual_seed(0 ) lowercase_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = self.dummy_text_encoder lowercase_ : int = self.dummy_tokenizer lowercase_ : List[str] = self.dummy_unet lowercase_ : int = self.dummy_movq lowercase_ : Optional[Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowercase_ , ) lowercase_ : Any = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict=0 ): lowercase_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowercase_ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase_ ) # create init_image lowercase_ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowercase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ : Dict = Image.fromarray(np.uinta(lowercase_ ) ).convert("""RGB""" ).resize((256, 256) ) # create mask lowercase_ : Tuple = np.ones((64, 64) , dtype=np.floataa ) lowercase_ : Any = 0 if str(lowercase_ ).startswith("""mps""" ): lowercase_ : Optional[Any] = torch.manual_seed(lowercase_ ) else: lowercase_ : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase_ : Union[str, Any] = { """prompt""": """horse""", """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = """cpu""" lowercase_ : Tuple = self.get_dummy_components() lowercase_ : List[Any] = self.pipeline_class(**lowercase_ ) lowercase_ : Dict = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Union[str, Any] = pipe(**self.get_dummy_inputs(lowercase_ ) ) lowercase_ : Dict = output.images lowercase_ : str = pipe( **self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0] lowercase_ : List[Any] = image[0, -3:, -3:, -1] lowercase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] print(f'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) lowercase_ : Optional[Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" ) lowercase_ : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) lowercase_ : Dict = np.ones((768, 768) , dtype=np.floataa ) lowercase_ : Union[str, Any] = 0 lowercase_ : Optional[Any] = """a hat""" lowercase_ : List[str] = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(lowercase_ ) lowercase_ : Optional[Any] = KandinskyInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa ) lowercase_ : Tuple = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase_ : Any = pipe_prior( lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() lowercase_ : Optional[int] = pipeline( lowercase_ , image=lowercase_ , mask_image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , ) lowercase_ : Optional[int] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ )
705
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __magic_name__ : def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ): lowercase_ : int = parent lowercase_ : str = batch_size lowercase_ : List[str] = image_size lowercase_ : str = num_channels lowercase_ : List[Any] = patch_size lowercase_ : Optional[Any] = num_frames lowercase_ : Dict = is_training lowercase_ : int = use_labels lowercase_ : List[str] = hidden_size lowercase_ : Dict = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : List[Any] = attention_probs_dropout_prob lowercase_ : Any = attention_type lowercase_ : Union[str, Any] = initializer_range lowercase_ : List[str] = scope lowercase_ : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token lowercase_ : Dict = (image_size // patch_size) ** 2 lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Optional[Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase_ : int = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : int = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) lowercase_ : Any = self.num_labels return config def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ): lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ): lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) # verify the logits shape lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[str] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs lowercase_ : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ): lowercase_ : List[Any] = copy.deepcopy(lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): lowercase_ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : str = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Dict = model_class(lowercase_ ) lowercase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): if not self.has_attentions: pass else: lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : List[str] = True for model_class in self.all_model_classes: lowercase_ : str = self.model_tester.seq_length lowercase_ : int = self.model_tester.num_frames lowercase_ : int = True lowercase_ : Any = False lowercase_ : str = True lowercase_ : int = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : List[str] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase_ : List[str] = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : int = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) lowercase_ : Optional[Any] = len(lowercase_ ) # Check attention is always last and order is fine lowercase_ : Tuple = True lowercase_ : Dict = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + 1 , len(lowercase_ ) ) lowercase_ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ): lowercase_ : List[str] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : Dict = outputs.hidden_states lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase_ ) , lowercase_ ) lowercase_ : List[Any] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[str] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def lowerCamelCase ( ) -> Optional[int]: lowercase_ : List[str] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) lowercase_ : List[Any] = np.load(UpperCAmelCase__ ) return list(UpperCAmelCase__ ) @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : str ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( lowercase_ ) lowercase_ : Optional[Any] = self.default_image_processor lowercase_ : Any = prepare_video() lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : Optional[Any] = model(**lowercase_ ) # verify the logits lowercase_ : Any = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
30
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig _lowercase : Union[str, Any] = { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json", } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''albert''' def __init__( self : List[Any] , lowercase_ : List[str]=30000 , lowercase_ : Optional[Any]=128 , lowercase_ : List[str]=4096 , lowercase_ : str=12 , lowercase_ : List[Any]=1 , lowercase_ : List[str]=64 , lowercase_ : Optional[int]=16384 , lowercase_ : List[Any]=1 , lowercase_ : List[str]="gelu_new" , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[Any]=0 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : int=1E-12 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Dict="absolute" , lowercase_ : Any=0 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=3 , **lowercase_ : str , ): super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowercase_ : List[str] = vocab_size lowercase_ : List[Any] = embedding_size lowercase_ : Tuple = hidden_size lowercase_ : Tuple = num_hidden_layers lowercase_ : Dict = num_hidden_groups lowercase_ : Optional[Any] = num_attention_heads lowercase_ : Dict = inner_group_num lowercase_ : List[str] = hidden_act lowercase_ : int = intermediate_size lowercase_ : Union[str, Any] = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : str = max_position_embeddings lowercase_ : Union[str, Any] = type_vocab_size lowercase_ : Optional[Any] = initializer_range lowercase_ : Dict = layer_norm_eps lowercase_ : Optional[Any] = classifier_dropout_prob lowercase_ : Any = position_embedding_type class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): if self.task == "multiple-choice": lowercase_ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : int = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
706
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig _lowercase : Tuple = logging.get_logger(__name__) # General docstring _lowercase : List[str] = "RegNetConfig" # Base docstring _lowercase : Dict = "facebook/regnet-y-040" _lowercase : Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring _lowercase : Optional[Any] = "facebook/regnet-y-040" _lowercase : Union[str, Any] = "tabby, tabby cat" _lowercase : str = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __magic_name__ ( nn.Module): def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ): super().__init__() lowercase_ : List[Any] = nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , ) lowercase_ : str = nn.BatchNormad(lowercase_ ) lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ): lowercase_ : Dict = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : List[Any] , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : str = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowercase_ : Any = config.num_channels def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ): lowercase_ : List[str] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) lowercase_ : Any = self.embedder(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ): super().__init__() lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ ) lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ): lowercase_ : Tuple = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : int , lowercase_ : int ): super().__init__() lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) lowercase_ : int = nn.Sequential( nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ): # b c h w -> b c 1 1 lowercase_ : List[str] = self.pooler(lowercase_ ) lowercase_ : Optional[int] = self.attention(lowercase_ ) lowercase_ : Any = hidden_state * attention return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : List[Any] = in_channels != out_channels or stride != 1 lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width ) lowercase_ : Dict = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : List[Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : int = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ): lowercase_ : Any = hidden_state lowercase_ : Union[str, Any] = self.layer(lowercase_ ) lowercase_ : Union[str, Any] = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : str = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : str = in_channels != out_channels or stride != 1 lowercase_ : int = max(1 , out_channels // config.groups_width ) lowercase_ : int = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : Union[str, Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : Optional[int] = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): lowercase_ : Optional[int] = hidden_state lowercase_ : str = self.layer(lowercase_ ) lowercase_ : int = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ): super().__init__() lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer lowercase_ : str = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ): lowercase_ : Tuple = self.layers(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Dict , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : Optional[Any] = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ): self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ): lowercase_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ : Union[str, Any] = hidden_states + (hidden_state,) lowercase_ : Dict = stage_module(lowercase_ ) if output_hidden_states: lowercase_ : Optional[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = RegNetConfig UpperCamelCase__ = '''regnet''' UpperCamelCase__ = '''pixel_values''' UpperCamelCase__ = True def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ): if isinstance(lowercase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ): if isinstance(lowercase_ , lowercase_ ): lowercase_ : List[str] = value _lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Any , lowercase_ : Any ): super().__init__(lowercase_ ) lowercase_ : List[str] = config lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ ) lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ ) lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ): lowercase_ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : str = self.embedder(lowercase_ ) lowercase_ : Optional[Any] = self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : List[Any] = encoder_outputs[0] lowercase_ : str = self.pooler(lowercase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Dict , lowercase_ : str ): super().__init__(lowercase_ ) lowercase_ : Any = config.num_labels lowercase_ : List[str] = RegNetModel(lowercase_ ) # classification head lowercase_ : Any = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ): lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1] lowercase_ : List[Any] = self.classifier(lowercase_ ) lowercase_ : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase_ : Optional[int] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase_ : str = """single_label_classification""" else: lowercase_ : str = """multi_label_classification""" if self.config.problem_type == "regression": lowercase_ : str = MSELoss() if self.num_labels == 1: lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ ) elif self.config.problem_type == "single_label_classification": lowercase_ : Optional[int] = CrossEntropyLoss() lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase_ : Dict = BCEWithLogitsLoss() lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ ) if not return_dict: lowercase_ : Tuple = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
30
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowercase : Optional[Any] = 16 _lowercase : int = 32 def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : str = "bert-base-cased" ) -> Tuple: lowercase_ : str = AutoTokenizer.from_pretrained(UpperCAmelCase__ ) lowercase_ : Optional[int] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCAmelCase__ : str ): # max_length=None => use the model max length (it's actually the default) lowercase_ : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase_ : Optional[Any] = datasets.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=UpperCAmelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCAmelCase__ : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(UpperCAmelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowercase_ : Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) lowercase_ : str = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) return train_dataloader, eval_dataloader def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> Tuple: model.eval() lowercase_ : List[str] = 0 for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase_ : Union[str, Any] = model(**UpperCAmelCase__ ) lowercase_ : int = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowercase_ : List[Any] = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCAmelCase__ ) - 1: lowercase_ : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase_ : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , ) lowercase_ : List[str] = metric.compute() return eval_metric["accuracy"] def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ) -> List[str]: # Initialize accelerator lowercase_ : Any = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase_ : List[Any] = config["""lr"""] lowercase_ : int = int(config["""num_epochs"""] ) lowercase_ : Dict = int(config["""seed"""] ) lowercase_ : Optional[Any] = int(config["""batch_size"""] ) lowercase_ : Tuple = args.model_name_or_path set_seed(UpperCAmelCase__ ) lowercase_ : Tuple = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase_ : int = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ ) # Instantiate optimizer lowercase_ : Optional[int] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase_ : str = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ ) if accelerator.state.deepspeed_plugin is not None: lowercase_ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowercase_ : List[Any] = 1 lowercase_ : List[str] = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase_ : Tuple = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , ) else: lowercase_ : str = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase_ : Optional[int] = accelerator.prepare( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # We need to keep track of how many total steps we have iterated over lowercase_ : Any = 0 # We also need to keep track of the stating epoch so files are named properly lowercase_ : Optional[Any] = 0 lowercase_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) lowercase_ : Optional[Any] = num_epochs if args.partial_train_epoch is not None: lowercase_ : List[str] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) lowercase_ : Optional[int] = args.resume_from_checkpoint.split("""epoch_""" )[1] lowercase_ : str = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowercase_ : Union[str, Any] = int(UpperCAmelCase__ ) + 1 lowercase_ : Optional[Any] = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) accelerator.print("""resumed checkpoint performance:""" , UpperCAmelCase__ ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , """r""" ) as f: lowercase_ : Any = json.load(UpperCAmelCase__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowercase_ : Optional[int] = {} for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ): model.train() for step, batch in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = model(**UpperCAmelCase__ ) lowercase_ : Dict = outputs.loss lowercase_ : List[str] = loss / gradient_accumulation_steps accelerator.backward(UpperCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowercase_ : List[str] = F'''epoch_{epoch}''' lowercase_ : Dict = os.path.join(args.output_dir , UpperCAmelCase__ ) accelerator.save_state(UpperCAmelCase__ ) lowercase_ : Dict = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Optional[int] = accuracy lowercase_ : List[str] = lr_scheduler.get_lr()[0] lowercase_ : List[str] = optimizer.param_groups[0]["""lr"""] lowercase_ : List[str] = epoch lowercase_ : List[str] = overall_step accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , """w""" ) as f: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( ) -> str: lowercase_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=UpperCAmelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=UpperCAmelCase__ , ) parser.add_argument( """--output_dir""" , type=UpperCAmelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=UpperCAmelCase__ , default=2 , help="""Number of train epochs.""" , ) lowercase_ : Dict = parser.parse_args() lowercase_ : Tuple = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": main()
707
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' from torch import nn class __magic_name__ ( nn.Module): def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] ): super().__init__() lowercase_ : Optional[Any] = class_size lowercase_ : str = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowercase_ : Union[str, Any] = nn.Linear(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[str] ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowercase_ : Dict = self.mlp(lowercase_ ) return logits
708
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ ) lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase_ : str = dataset_size < in_memory_max_size else: lowercase_ : List[Any] = False lowercase_ : Any = is_small_dataset(UpperCAmelCase__ ) assert result == expected
30
0
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Any = 0 @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowercase_ : Tuple = AutoTokenizer.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(lowercase_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowercase_ : str = AutoTokenizer.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) self.assertIsInstance(lowercase_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(lowercase_ ) , 0 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : int = AutoTokenizer.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[Any] = AutoConfig.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) # Check that tokenizer_type ≠ model_type lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ , config=lowercase_ ) self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def SCREAMING_SNAKE_CASE_ ( self : Any ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase_ , """vocab.txt""" ) ) lowercase_ : Dict = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""bert""" , use_fast=lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase_ , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase_ , """merges.txt""" ) ) lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""gpt2""" , use_fast=lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Tuple ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase_ , """vocab.txt""" ) ) lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""bert""" ) self.assertIsInstance(lowercase_ , lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase_ , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase_ , """merges.txt""" ) ) lowercase_ : Tuple = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""gpt2""" ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): with pytest.raises(lowercase_ ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowercase_ : Dict = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(lowercase_ , lowercase_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase_ ) else: self.assertEqual(tokenizer.do_lower_case , lowercase_ ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( lowercase_ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): lowercase_ : Union[str, Any] = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowercase_ : Union[str, Any] = TOKENIZER_MAPPING.values() lowercase_ : Union[str, Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(lowercase_ ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Tuple ): self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ ) , lowercase_ ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase_ ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : List[str] = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase_ ) lowercase_ : Union[str, Any] = """Hello, world. How are you?""" lowercase_ : Optional[Any] = tokenizer.tokenize(lowercase_ ) self.assertEqual("""[UNK]""" , tokens[0] ) lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase_ ) lowercase_ : Optional[int] = tokenizer.tokenize(lowercase_ ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[str] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(lowercase_ ) , lowercase_ ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = AutoTokenizer.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): # Check we can load the tokenizer config of an online model. lowercase_ : str = get_tokenizer_config("""bert-base-cased""" ) lowercase_ : Optional[Any] = config.pop("""_commit_hash""" , lowercase_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(lowercase_ , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowercase_ : Optional[Any] = get_tokenizer_config(lowercase_ ) self.assertDictEqual(lowercase_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase_ ) lowercase_ : Tuple = get_tokenizer_config(lowercase_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): try: AutoConfig.register("""custom""" , lowercase_ ) AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase_ ): AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ ) lowercase_ : int = CustomTokenizer.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase_ ) lowercase_ : str = AutoTokenizer.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): try: AutoConfig.register("""custom""" , lowercase_ ) # Can register in two steps AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( lowercase_ , slow_tokenizer_class=lowercase_ , fast_tokenizer_class=lowercase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase_ ): AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained(lowercase_ ) bert_tokenizer.save_pretrained(lowercase_ ) lowercase_ : Dict = CustomTokenizerFast.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase_ ) lowercase_ : int = AutoTokenizer.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) lowercase_ : List[str] = AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE_ ( self : int ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase_ ): lowercase_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase_ ): lowercase_ : Dict = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ ) lowercase_ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase_ ) lowercase_ : List[Any] = AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase_ : List[Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ , use_fast=lowercase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase_ ) lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ , use_fast=lowercase_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = False class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = NewTokenizer UpperCamelCase__ = False try: AutoConfig.register("""custom""" , lowercase_ ) AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ ) AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ ) # If remote code is not set, the default is to use local lowercase_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) lowercase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowercase_ : Optional[int] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) lowercase_ : List[Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ , use_fast=lowercase_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) lowercase_ : Tuple = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ , use_fast=lowercase_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[int] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase_ , use_fast=lowercase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): with self.assertRaisesRegex( lowercase_ , """bert-base is not a local folder and is not a valid model identifier""" ): lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): with self.assertRaisesRegex( lowercase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowercase_ : List[str] = AutoTokenizer.from_pretrained(lowercase_ , revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # Make sure we have cached the tokenizer. lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: lowercase_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
709
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ) lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" ) lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy() lowercase_ : Optional[int] = -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : float ) -> float: if edge <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""Length must be a positive.""" ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def lowerCamelCase ( UpperCAmelCase__ : float ) -> float: if edge <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""Length must be a positive.""" ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
710
'''simple docstring''' from collections.abc import Callable import numpy as np def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array: lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) ) lowercase_ : List[Any] = np.zeros((n + 1,) ) lowercase_ : List[Any] = ya lowercase_ : List[str] = xa for k in range(UpperCAmelCase__ ): lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] ) lowercase_ : List[Any] = y[k] + ( (step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCAmelCase__ ) ) def lowerCamelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int ) -> bool: # Base Case if index == len(UpperCAmelCase__ ): return True # Recursive Step for i in range(UpperCAmelCase__ ): if valid_coloring(graph[index] , UpperCAmelCase__ , UpperCAmelCase__ ): # Color current vertex lowercase_ : Union[str, Any] = i # Validate coloring if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ): return True # Backtrack lowercase_ : int = -1 return False def lowerCamelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int ) -> list[int]: lowercase_ : List[Any] = [-1] * len(UpperCAmelCase__ ) if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 0 ): return colored_vertices return []
711
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Dict = (1 - _cos) / 2 lowercase_ : Optional[int] = 1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : List[Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Optional[int] = sin(UpperCAmelCase__ ) lowercase_ : Dict = cos(UpperCAmelCase__ ) lowercase_ : Optional[int] = _sin / (2 * q_factor) lowercase_ : Dict = (1 + _cos) / 2 lowercase_ : str = -1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : List[Any] = 1 - alpha lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : int = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ ) lowercase_ : str = _sin / (2 * q_factor) lowercase_ : str = _sin / 2 lowercase_ : Any = 0 lowercase_ : Optional[Any] = -ba lowercase_ : Dict = 1 + alpha lowercase_ : Union[str, Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : List[str] = tau * frequency / samplerate lowercase_ : Any = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : Optional[Any] = _sin / (2 * q_factor) lowercase_ : Any = 1 - alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : Optional[int] = 1 + alpha lowercase_ : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : List[Any] = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : List[str] = 1 + alpha * big_a lowercase_ : List[Any] = -2 * _cos lowercase_ : Dict = 1 - alpha * big_a lowercase_ : str = 1 + alpha / big_a lowercase_ : List[str] = -2 * _cos lowercase_ : Tuple = 1 - alpha / big_a lowercase_ : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ ) lowercase_ : Any = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (pmc + aaa) lowercase_ : List[str] = 2 * big_a * mpc lowercase_ : Union[str, Any] = big_a * (pmc - aaa) lowercase_ : Optional[int] = ppmc + aaa lowercase_ : Optional[int] = -2 * pmpc lowercase_ : Any = ppmc - aaa lowercase_ : Optional[int] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Dict = _sin / (2 * q_factor) lowercase_ : Union[str, Any] = 10 ** (gain_db / 40) lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (ppmc + aaa) lowercase_ : List[Any] = -2 * big_a * pmpc lowercase_ : Optional[Any] = big_a * (ppmc - aaa) lowercase_ : Optional[Any] = pmc + aaa lowercase_ : int = 2 * mpc lowercase_ : Tuple = pmc - aaa lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
30
0
'''simple docstring''' from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image _lowercase : Union[str, Any] = ["text", "image", "audio"] def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> List[Any]: lowercase_ : Dict = [] for input_type in input_types: if input_type == "text": inputs.append("""Text input""" ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3000 ) ) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): inputs.append(create_inputs(UpperCAmelCase__ ) ) else: raise ValueError(F'''Invalid type requested: {input_type}''' ) return inputs def lowerCamelCase ( UpperCAmelCase__ : List ) -> List[Any]: lowercase_ : str = [] for output in outputs: if isinstance(UpperCAmelCase__ , (str, AgentText) ): output_types.append("""text""" ) elif isinstance(UpperCAmelCase__ , (Image.Image, AgentImage) ): output_types.append("""image""" ) elif isinstance(UpperCAmelCase__ , (torch.Tensor, AgentAudio) ): output_types.append("""audio""" ) else: raise ValueError(F'''Invalid output: {output}''' ) return output_types @is_tool_test class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): self.assertTrue(hasattr(self.tool , """inputs""" ) ) self.assertTrue(hasattr(self.tool , """outputs""" ) ) lowercase_ : List[str] = self.tool.inputs for _input in inputs: if isinstance(_input , lowercase_ ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) lowercase_ : Any = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = create_inputs(self.tool.inputs ) lowercase_ : List[str] = self.tool(*lowercase_ ) # There is a single output if len(self.tool.outputs ) == 1: lowercase_ : Any = [outputs] self.assertListEqual(output_types(lowercase_ ) , self.tool.outputs ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): self.assertTrue(hasattr(self.tool , """description""" ) ) self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) ) self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[Any] = create_inputs(self.tool.inputs ) lowercase_ : Optional[int] = self.tool(*lowercase_ ) if not isinstance(lowercase_ , lowercase_ ): lowercase_ : List[Any] = [outputs] self.assertEqual(len(lowercase_ ) , len(self.tool.outputs ) ) for output, output_type in zip(lowercase_ , self.tool.outputs ): lowercase_ : int = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : int = create_inputs(self.tool.inputs ) lowercase_ : Dict = [] for _input, input_type in zip(lowercase_ , self.tool.inputs ): if isinstance(lowercase_ , lowercase_ ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error lowercase_ : Dict = self.tool(*lowercase_ ) if not isinstance(lowercase_ , lowercase_ ): lowercase_ : List[str] = [outputs] self.assertEqual(len(lowercase_ ) , len(self.tool.outputs ) )
712
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _lowercase : str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __magic_name__ ( datasets.BuilderConfig): UpperCamelCase__ = None def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str: import pyspark def generate_fn(): lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" ) lowercase_ : Any = partition_df.collect() lowercase_ : Dict = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class __magic_name__ ( _BaseExamplesIterable): def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ): lowercase_ : Dict = df lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ): yield from self.generate_examples_fn() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ): lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return len(self.partition_order ) class __magic_name__ ( datasets.DatasetBuilder): UpperCamelCase__ = SparkConfig def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ): import pyspark lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowercase_ : Optional[int] = df lowercase_ : List[str] = working_dir super().__init__( cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , ) def SCREAMING_SNAKE_CASE_ ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(lowercase_ : str ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase_ ) lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase_ , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowercase_ : str = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): import pyspark def get_arrow_batch_size(lowercase_ : Any ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) lowercase_ : Union[str, Any] = self.df.count() lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowercase_ : Any = ( self.df.limit(lowercase_ ) .repartition(1 ) .mapInArrow(lowercase_ , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) ) lowercase_ : Any = self.df.repartition(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ): import pyspark lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath lowercase_ : Optional[Any] = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowercase_ : Tuple = self.config.features lowercase_ : Any = self._writer_batch_size lowercase_ : List[str] = self._fs.storage_options def write_arrow(lowercase_ : str ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId() lowercase_ : Dict = next(lowercase_ , lowercase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) lowercase_ : int = 0 lowercase_ : List[Any] = writer_class( features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowercase_ , lowercase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 lowercase_ : Any = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : List[str] = pa.Table.from_batches([batch] ) writer.write_table(lowercase_ ) if writer._num_bytes > 0: lowercase_ , lowercase_ : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase_ ) ): lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) ) shutil.move(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = ( self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ): self._validate_cache_dir() lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase_ ) lowercase_ : Tuple = not is_remote_filesystem(self._fs ) lowercase_ : int = os.path.join if is_local else posixpath.join lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN""" lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ ) lowercase_ : Any = 0 lowercase_ : Tuple = 0 lowercase_ : int = 0 lowercase_ : Dict = [] lowercase_ : Union[str, Any] = [] for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ): ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Union[str, Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase_ ) lowercase_ : List[str] = total_num_examples lowercase_ : int = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: lowercase_ : Tuple = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowercase_ : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowercase_ : int , lowercase_ : int , lowercase_ : int , ): rename( lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , ) lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 0 for i in range(len(lowercase_ ) ): lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i] for shard_id in range(lowercase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect() else: # don't use any pattern lowercase_ : List[str] = 0 lowercase_ : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
30
0
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowercase : List[str] = logging.get_logger(__name__) _lowercase : List[str] = {"vocab_file": "spiece.model"} _lowercase : List[Any] = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } _lowercase : Dict = { "AI-Sweden/gpt-sw3-126m": 2048, "AI-Sweden/gpt-sw3-350m": 2048, "AI-Sweden/gpt-sw3-1.6b": 2048, "AI-Sweden/gpt-sw3-6.7b": 2048, "AI-Sweden/gpt-sw3-20b": 2048, } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self : str , lowercase_ : Optional[int] , lowercase_ : Any=False , lowercase_ : Union[str, Any]=False , lowercase_ : Dict=False , lowercase_ : str=None , lowercase_ : Optional[int]=None , lowercase_ : str=None , lowercase_ : Any=None , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ): lowercase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs lowercase_ : Union[str, Any] = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowercase_ : List[Any] = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowercase_ : Dict = """<|endoftext|>""" if eos_token is None else eos_token lowercase_ : Optional[Any] = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowercase_ : Union[str, Any] = unk_token if pad_token is None else pad_token lowercase_ : int = eos_token if bos_token is None else bos_token else: lowercase_ : Optional[Any] = """<pad>""" if pad_token is None else pad_token lowercase_ : int = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) lowercase_ : Optional[Any] = do_lower_case lowercase_ : List[Any] = remove_space lowercase_ : Tuple = keep_accents lowercase_ : List[Any] = vocab_file lowercase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase_ ) # Used for whitespace normalization in input texts # fmt : off lowercase_ : Optional[Any] = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowercase_ : int = re.compile( f'''[{''.join(map(lowercase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' ) def __getstate__( self : Tuple ): lowercase_ : int = self.__dict__.copy() lowercase_ : Union[str, Any] = None return state def __setstate__( self : Union[str, Any] , lowercase_ : str ): lowercase_ : Any = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase_ : Optional[Any] = {} lowercase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return len(self.sp_model ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str ): lowercase_ : Union[str, Any] = self.non_printing_characters_re.sub("""""" , lowercase_ ) # Normalize whitespaces lowercase_ : Union[str, Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowercase_ : Tuple = unicodedata.normalize("""NFC""" , lowercase_ ) return text def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , **lowercase_ : List[Any] ): lowercase_ : Tuple = self.preprocess_text(lowercase_ ) return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ): return self.sp_model.PieceToId(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : int ): return self.sp_model.IdToPiece(lowercase_ ) @staticmethod def SCREAMING_SNAKE_CASE_ ( lowercase_ : str ): return out_string def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ): lowercase_ : Optional[int] = [] lowercase_ : Optional[Any] = """""" lowercase_ : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase_ ) + token lowercase_ : Optional[Any] = True lowercase_ : Optional[Any] = [] else: current_sub_tokens.append(lowercase_ ) lowercase_ : List[str] = False out_string += self.sp_model.decode(lowercase_ ) return out_string def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : Optional[str] = None ): if not os.path.isdir(lowercase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : str = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ , """wb""" ) as fi: lowercase_ : Any = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, List[str]] , lowercase_ : Union[str, bool] = False ): if isinstance(lowercase_ , lowercase_ ): lowercase_ : str = self.preprocess_text(lowercase_ ) lowercase_ : Any = self.sp_model.encode(lowercase_ ) else: lowercase_ : int = [self.preprocess_text(lowercase_ ) for t in text] lowercase_ : Dict = self.sp_model.encode(lowercase_ ) if return_tensors is True or return_tensors == "pt": lowercase_ : List[str] = torch.tensor(lowercase_ ) return token_ids def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[int, List[int]] ): return self.sp_model.decode(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : "Conversation" ): lowercase_ : Optional[Any] = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] lowercase_ : Optional[Any] = ( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(lowercase_ ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=lowercase_ )
713
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Dict = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ): lowercase_ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 lowercase_ : Optional[Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowercase_ : str = min(UpperCAmelCase__ , UpperCAmelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
714
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def lowerCamelCase ( ) -> None: lowercase_ : List[Any] = input("""Enter message: """ ) lowercase_ : str = input("""Enter key [alphanumeric]: """ ) lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): lowercase_ : List[str] = """encrypt""" lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) elif mode.lower().startswith("""d""" ): lowercase_ : Any = """decrypt""" lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''\n{mode.title()}ed message:''' ) print(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: lowercase_ : Union[str, Any] = [] lowercase_ : List[Any] = 0 lowercase_ : str = key.upper() for symbol in message: lowercase_ : Tuple = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(UpperCAmelCase__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(UpperCAmelCase__ ): lowercase_ : Any = 0 else: translated.append(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) if __name__ == "__main__": main()
30
0
'''simple docstring''' from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> list[Any]: if not input_list: return [] lowercase_ : Dict = [input_list.count(UpperCAmelCase__ ) for value in input_list] lowercase_ : Optional[Any] = max(UpperCAmelCase__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(UpperCAmelCase__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
715
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = """ylacombe/bark-small""" lowercase_ : List[str] = tempfile.mkdtemp() lowercase_ : Tuple = """en_speaker_1""" lowercase_ : Union[str, Any] = """This is a test string""" lowercase_ : int = """speaker_embeddings_path.json""" lowercase_ : Any = """speaker_embeddings""" def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ): return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = self.get_tokenizer() lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase_ : Optional[int] = 35 lowercase_ : int = 2 lowercase_ : Union[str, Any] = 8 lowercase_ : Union[str, Any] = { """semantic_prompt""": np.ones(lowercase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : Dict = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : List[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_tokenizer() lowercase_ : int = BarkProcessor(tokenizer=lowercase_ ) lowercase_ : Any = processor(text=self.input_string ) lowercase_ : List[str] = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowercase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = ["NllbTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = ["NllbTokenizerFast"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys _lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
716
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True}) UpperCamelCase__ = Features({'''image''': Image()}) UpperCamelCase__ = Features({'''labels''': ClassLabel}) UpperCamelCase__ = "image" UpperCamelCase__ = "labels" def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowercase_ : List[str] = copy.deepcopy(self ) lowercase_ : List[str] = self.label_schema.copy() lowercase_ : List[Any] = features[self.label_column] lowercase_ : Optional[Any] = label_schema return task_template @property def SCREAMING_SNAKE_CASE_ ( self : int ): return { self.image_column: "image", self.label_column: "labels", }
30
0
'''simple docstring''' import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC _lowercase : Optional[Any] = parse(importlib.metadata.version("torch")) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Version] , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Optional[Any]: if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) lowercase_ : Union[str, Any] = STR_OPERATION_TO_FUNC[operation] if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Optional[int] = parse(importlib.metadata.version(UpperCAmelCase__ ) ) return operation(UpperCAmelCase__ , parse(UpperCAmelCase__ ) ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Optional[int]: return compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
717
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowercase_ : str = 1.5 lowercase_ : List[Any] = int(factor * num_class_images ) lowercase_ : int = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ : List[str] = client.query(text=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase_ : List[str] = int(factor * num_images ) lowercase_ : List[str] = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , ) lowercase_ : List[str] = 0 lowercase_ : Dict = 0 lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open( F'''{class_data_dir}/images.txt''' , """w""" ) as fa: while total < num_class_images: lowercase_ : str = class_images[count] count += 1 try: lowercase_ : Union[str, Any] = requests.get(images["""url"""] ) if img.status_code == 200: lowercase_ : List[str] = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ ) return parser.parse_args() if __name__ == "__main__": _lowercase : Dict = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
30
0
'''simple docstring''' import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = (IPNDMScheduler,) UpperCamelCase__ = (('''num_inference_steps''', 50),) def SCREAMING_SNAKE_CASE_ ( self : Dict , **lowercase_ : Dict ): lowercase_ : Tuple = {"""num_train_timesteps""": 1000} config.update(**lowercase_ ) return config def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Optional[Any]=0 , **lowercase_ : Any ): lowercase_ : Union[str, Any] = dict(self.forward_default_kwargs ) lowercase_ : List[Any] = kwargs.pop("""num_inference_steps""" , lowercase_ ) lowercase_ : Any = self.dummy_sample lowercase_ : Optional[Any] = 0.1 * sample lowercase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase_ : Optional[Any] = self.get_scheduler_config(**lowercase_ ) lowercase_ : List[str] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals lowercase_ : Optional[int] = dummy_past_residuals[:] if time_step is None: lowercase_ : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) lowercase_ : Any = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals lowercase_ : Dict = dummy_past_residuals[:] lowercase_ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample lowercase_ : Any = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase_ : str = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample lowercase_ : Dict = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self : int ): pass def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int=0 , **lowercase_ : Any ): lowercase_ : Any = dict(self.forward_default_kwargs ) lowercase_ : Dict = kwargs.pop("""num_inference_steps""" , lowercase_ ) lowercase_ : Tuple = self.dummy_sample lowercase_ : Optional[Any] = 0.1 * sample lowercase_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase_ : Any = self.get_scheduler_config() lowercase_ : Any = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) lowercase_ : Any = dummy_past_residuals[:] if time_step is None: lowercase_ : int = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) lowercase_ : List[Any] = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) lowercase_ : Union[str, Any] = dummy_past_residuals[:] lowercase_ : int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample lowercase_ : Dict = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase_ : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample lowercase_ : Optional[Any] = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : List[Any] ): lowercase_ : str = self.scheduler_classes[0] lowercase_ : str = self.get_scheduler_config(**lowercase_ ) lowercase_ : Dict = scheduler_class(**lowercase_ ) lowercase_ : List[Any] = 10 lowercase_ : int = self.dummy_model() lowercase_ : Any = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): lowercase_ : str = model(lowercase_ , lowercase_ ) lowercase_ : str = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowercase_ : Dict = model(lowercase_ , lowercase_ ) lowercase_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = dict(self.forward_default_kwargs ) lowercase_ : Union[str, Any] = kwargs.pop("""num_inference_steps""" , lowercase_ ) for scheduler_class in self.scheduler_classes: lowercase_ : Optional[int] = self.get_scheduler_config() lowercase_ : Tuple = scheduler_class(**lowercase_ ) lowercase_ : List[str] = self.dummy_sample lowercase_ : Dict = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , """set_timesteps""" ): scheduler.set_timesteps(lowercase_ ) elif num_inference_steps is not None and not hasattr(lowercase_ , """set_timesteps""" ): lowercase_ : List[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase_ : Tuple = dummy_past_residuals[:] lowercase_ : int = scheduler.timesteps[5] lowercase_ : List[Any] = scheduler.timesteps[6] lowercase_ : str = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample lowercase_ : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) lowercase_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample lowercase_ : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE_ ( self : Any ): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_ , time_step=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase_ , time_step=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Optional[Any] = self.full_loop() lowercase_ : str = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 2540529 ) < 10
718
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None: if start is None: lowercase_ : Any = 0 if end is None: lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1 if start >= end: return lowercase_ : Optional[int] = (start + end) // 2 slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ ) if sequence[end] < sequence[mid]: lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end] slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
30
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Tuple = logging.get_logger(__name__) _lowercase : Any = { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json" ), "distilbert-base-uncased-finetuned-sst-2-english": ( "https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json" ), } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''distilbert''' UpperCamelCase__ = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self : List[Any] , lowercase_ : str=30522 , lowercase_ : List[Any]=512 , lowercase_ : Any=False , lowercase_ : List[str]=6 , lowercase_ : str=12 , lowercase_ : Optional[Any]=768 , lowercase_ : Any=4 * 768 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.1 , lowercase_ : Any="gelu" , lowercase_ : Tuple=0.02 , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.2 , lowercase_ : str=0 , **lowercase_ : List[str] , ): lowercase_ : Dict = vocab_size lowercase_ : str = max_position_embeddings lowercase_ : Optional[Any] = sinusoidal_pos_embds lowercase_ : Optional[Any] = n_layers lowercase_ : List[Any] = n_heads lowercase_ : int = dim lowercase_ : str = hidden_dim lowercase_ : Optional[Any] = dropout lowercase_ : Any = attention_dropout lowercase_ : Dict = activation lowercase_ : Tuple = initializer_range lowercase_ : str = qa_dropout lowercase_ : List[str] = seq_classif_dropout super().__init__(**lowercase_ , pad_token_id=lowercase_ ) class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): if self.task == "multiple-choice": lowercase_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : Union[str, Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
719
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowercase : Dict = parser.parse_args() _lowercase : Dict = "cpu" _lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowercase : Any = "path-to-your-trained-model" _lowercase : str = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowercase : Any = pipe.to(device) # to channels last _lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last) _lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last) _lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowercase : int = torch.randn(2, 4, 64, 64) _lowercase : int = torch.rand(1) * 999 _lowercase : Union[str, Any] = torch.randn(2, 77, 768) _lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status) try: _lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowercase : int = 666 _lowercase : Any = torch.Generator(device).manual_seed(seed) _lowercase : int = {"generator": generator} if args.steps is not None: _lowercase : Optional[int] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
30
0
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowercase : Optional[List[str]] = None _lowercase : Tuple = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowercase : List[Any] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class __magic_name__ : UpperCamelCase__ = True UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = '''PIL.Image.Image''' UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()}) UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase) def __call__( self : Optional[int] ): return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowercase_ , lowercase_ ): lowercase_ : str = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : dict , lowercase_ : Any=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: lowercase_ : Any = {} lowercase_ : Optional[Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowercase_ ): lowercase_ : List[str] = PIL.Image.open(lowercase_ ) else: lowercase_ : Any = path.split("""::""" )[-1] try: lowercase_ : Optional[Any] = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] lowercase_ : Tuple = token_per_repo_id.get(lowercase_ ) except ValueError: lowercase_ : Tuple = None with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f: lowercase_ : Any = BytesIO(f.read() ) lowercase_ : str = PIL.Image.open(bytes_ ) else: lowercase_ : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): lowercase_ : Any = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) lowercase_ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase_ : Union[str, Any] = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowercase_ : List[str] = storage.field("""bytes""" ) else: lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowercase_ : List[str] = storage.field("""path""" ) else: lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase_ : List[Any] = pa.array( [encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase_ : Dict = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Optional[Any] = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(lowercase_ : Any ): with xopen(lowercase_ , """rb""" ) as f: lowercase_ : Any = f.read() return bytes_ lowercase_ : Dict = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase_ : int = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowercase_ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase_ : Union[str, Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes: lowercase_ : List[str] = BytesIO() if image.format in list_image_compression_formats(): lowercase_ : int = image.format else: lowercase_ : str = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict: if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) lowercase_ : Dict = array.dtype lowercase_ : Dict = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER lowercase_ : Any = dtype.kind lowercase_ : Tuple = dtype.itemsize lowercase_ : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase_ : Dict = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase_ : List[Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase_ : int = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase_ : Optional[Any] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: lowercase_ : Union[str, Any] = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : List[str] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
720
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Optional[Any] = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
import random def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : bool = False ) -> dict: lowercase_ : dict = {i: [] for i in range(UpperCAmelCase__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(UpperCAmelCase__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(UpperCAmelCase__ ): for j in range(i + 1 , UpperCAmelCase__ ): if random.random() < probability: graph[i].append(UpperCAmelCase__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(UpperCAmelCase__ ) return graph def lowerCamelCase ( UpperCAmelCase__ : int ) -> dict: return { i: [j for j in range(UpperCAmelCase__ ) if i != j] for i in range(UpperCAmelCase__ ) } if __name__ == "__main__": import doctest doctest.testmod()
721
'''simple docstring''' import unittest import numpy as np def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray: lowercase_ : List[Any] = np.shape(UpperCAmelCase__ ) lowercase_ : Dict = np.shape(UpperCAmelCase__ ) lowercase_ : int = np.shape(UpperCAmelCase__ ) if shape_a[0] != shape_b[0]: lowercase_ : Optional[int] = ( """Expected the same number of rows for A and B. """ F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(UpperCAmelCase__ ) if shape_b[1] != shape_c[1]: lowercase_ : Optional[Any] = ( """Expected the same number of columns for B and C. """ F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(UpperCAmelCase__ ) lowercase_ : Any = pseudo_inv if a_inv is None: try: lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Dict = np.array([[2, 1], [6, 3]] ) lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] ) lowercase_ : Optional[int] = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) self.assertAlmostEqual(lowercase_ , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""only integers accepted as input""" ) else: lowercase_ : Optional[int] = str(abs(UpperCAmelCase__ ) ) lowercase_ : Optional[Any] = [list(UpperCAmelCase__ ) for char in range(len(UpperCAmelCase__ ) )] for index in range(len(UpperCAmelCase__ ) ): num_transpositions[index].pop(UpperCAmelCase__ ) return max( int("""""".join(list(UpperCAmelCase__ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("doctest").testmod()
700
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(UpperCAmelCase__ ) lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data ) lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6) else: lowercase_ : Union[str, Any] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode() + padding ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[str] = ( """argument should be a bytes-like object or ASCII string, """ F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(UpperCAmelCase__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): try: lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowercase_ : Any = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase_ : Optional[int] = encoded_data[:-padding] lowercase_ : Any = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase_ : int = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data ) lowercase_ : Optional[int] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(UpperCAmelCase__ ) , 8 ) ] return bytes(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = "▁" _lowercase : str = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} _lowercase : Tuple = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } _lowercase : Optional[int] = {"vinai/bartpho-syllable": 1024} class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self : List[Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : List[Any]="<s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : Optional[Any]="<s>" , lowercase_ : int="<unk>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : List[Any]="<mask>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : int , ): # Mask token behave like a normal word, i.e. include the space before it lowercase_ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token lowercase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) lowercase_ : Tuple = vocab_file lowercase_ : Any = monolingual_vocab_file lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility lowercase_ : Dict = {} lowercase_ : Optional[int] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(lowercase_ ) not in self.fairseq_tokens_to_ids: lowercase_ : List[Any] = cnt cnt += 1 with open(lowercase_ , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): lowercase_ : int = line.strip().split()[0] lowercase_ : Dict = len(self.fairseq_tokens_to_ids ) if str(lowercase_ ) not in self.fairseq_tokens_to_ids: lowercase_ : Optional[Any] = len(self.fairseq_tokens_to_ids ) lowercase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : str ): lowercase_ : List[str] = self.__dict__.copy() lowercase_ : Optional[Any] = None lowercase_ : str = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[int] , lowercase_ : List[str] ): lowercase_ : int = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase_ : int = {} lowercase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase_ : Optional[int] = [self.cls_token_id] lowercase_ : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) if token_ids_a is None: return [1] + ([0] * len(lowercase_ )) + [1] return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): lowercase_ : Any = [self.sep_token_id] lowercase_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return len(self.fairseq_ids_to_tokens ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Dict = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ): return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[Any] ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str ): return self.fairseq_ids_to_tokens[index] def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = """""".join(lowercase_ ).replace(lowercase_ , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ): if not os.path.isdir(lowercase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase_ : str = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ , """wb""" ) as fi: lowercase_ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( lowercase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , lowercase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(lowercase_ )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
701
'''simple docstring''' import argparse _lowercase : Optional[int] = "docs/source/_static/js/custom.js" def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict: with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Optional[int] = f.readlines() lowercase_ : Tuple = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += F''' "v{version}": "v{version}",\n''' with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") _lowercase : Dict = parser.parse_args() update_custom_js(args.version)
30
0
'''simple docstring''' from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( '''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''', _UpperCAmelCase, ) class __magic_name__ ( _UpperCAmelCase): '''simple docstring''' UpperCamelCase__ = RobertaConfig UpperCamelCase__ = '''roberta''' def __init__( self : List[str] , lowercase_ : Optional[int] ): super().__init__(lowercase_ ) lowercase_ : int = RobertaEmbeddings(lowercase_ ) self.init_weights() @add_start_docstrings( '''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. ''', _UpperCAmelCase, ) class __magic_name__ ( _UpperCAmelCase): '''simple docstring''' UpperCamelCase__ = RobertaConfig UpperCamelCase__ = '''roberta''' def __init__( self : List[str] , lowercase_ : int ): super().__init__(lowercase_ ) lowercase_ : str = config.num_labels lowercase_ : List[str] = config.num_hidden_layers lowercase_ : Dict = DeeRobertaModel(lowercase_ ) lowercase_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob ) lowercase_ : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Dict=None , lowercase_ : int=None , lowercase_ : Optional[int]=None , lowercase_ : List[str]=None , lowercase_ : Tuple=None , lowercase_ : int=None , lowercase_ : List[str]=None , lowercase_ : List[str]=-1 , lowercase_ : Tuple=False , ): lowercase_ : int = self.num_layers try: lowercase_ : List[str] = self.roberta( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , ) lowercase_ : Optional[Any] = outputs[1] lowercase_ : List[str] = self.dropout(lowercase_ ) lowercase_ : str = self.classifier(lowercase_ ) lowercase_ : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: lowercase_ : Tuple = e.message lowercase_ : int = e.exit_layer lowercase_ : Dict = outputs[0] if not self.training: lowercase_ : Optional[Any] = entropy(lowercase_ ) lowercase_ : str = [] lowercase_ : List[str] = [] if labels is not None: if self.num_labels == 1: # We are doing regression lowercase_ : Optional[int] = MSELoss() lowercase_ : Optional[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: lowercase_ : Optional[int] = CrossEntropyLoss() lowercase_ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits lowercase_ : int = [] for highway_exit in outputs[-1]: lowercase_ : Dict = highway_exit[0] if not self.training: highway_logits_all.append(lowercase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression lowercase_ : List[Any] = MSELoss() lowercase_ : Tuple = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: lowercase_ : Optional[int] = CrossEntropyLoss() lowercase_ : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(lowercase_ ) if train_highway: lowercase_ : Any = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: lowercase_ : Dict = (loss,) + outputs if not self.training: lowercase_ : Dict = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: lowercase_ : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
702
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ): lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : Any = image_size lowercase_ : Optional[Any] = num_channels lowercase_ : Any = embeddings_size lowercase_ : Union[str, Any] = hidden_sizes lowercase_ : Any = depths lowercase_ : Dict = is_training lowercase_ : Tuple = use_labels lowercase_ : str = hidden_act lowercase_ : Optional[Any] = num_labels lowercase_ : Tuple = scope lowercase_ : Any = len(lowercase_ ) lowercase_ : Optional[Any] = out_features lowercase_ : Tuple = out_indices lowercase_ : str = num_groups def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : List[Any] = None if self.use_labels: lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : int = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ): lowercase_ : Optional[int] = BitModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ): lowercase_ : Union[str, Any] = self.num_labels lowercase_ : Tuple = BitForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Any = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ): lowercase_ : Any = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Dict = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase_ : List[str] = None lowercase_ : Dict = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[int] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs lowercase_ : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = BitModelTester(self ) lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return @unittest.skip(reason="""Bit does not output attentions""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[Any] = model_class(lowercase_ ) lowercase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(config=lowercase_ ) for name, module in model.named_modules(): if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : Optional[Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase_ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Dict = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase_ : Union[str, Any] = layer_type lowercase_ : Optional[Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Union[str, Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ ) lowercase_ : int = self.default_image_processor lowercase_ : List[Any] = prepare_img() lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : str = model(**lowercase_ ) # verify the logits lowercase_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitBackbone,) if is_torch_available() else () UpperCamelCase__ = BitConfig UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Union[str, Any] = BitModelTester(self )
30
0
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge _lowercase : Optional[Any] = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] _lowercase : List[Any] = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowerCamelCase ( ) -> List[str]: lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] ) assert ( pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean() ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Tuple = """rougeLsum""" lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCamelCase ( ) -> List[Any]: lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""] lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) assert score_sep == score_no_sep def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Union[str, Any] = [ """Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""", """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""", ] lowercase_ : List[str] = [ """Margot Frank, died in 1945, a month earlier than previously thought.""", """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of""" """ the final seconds on board Flight 9525.""", ] assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) def lowerCamelCase ( ) -> Union[str, Any]: lowercase_ : Optional[Any] = [ """\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """ ] lowercase_ : List[Any] = [ """ Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .""" ] lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""] lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""] assert new_score > prev_score def lowerCamelCase ( ) -> Tuple: lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" ) lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = calculate_rouge_path( data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
703
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge _lowercase : Optional[Any] = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] _lowercase : List[Any] = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowerCamelCase ( ) -> List[str]: lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] ) assert ( pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean() ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Tuple = """rougeLsum""" lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCamelCase ( ) -> List[Any]: lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""] lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) assert score_sep == score_no_sep def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Union[str, Any] = [ """Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""", """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""", ] lowercase_ : List[str] = [ """Margot Frank, died in 1945, a month earlier than previously thought.""", """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of""" """ the final seconds on board Flight 9525.""", ] assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) def lowerCamelCase ( ) -> Union[str, Any]: lowercase_ : Optional[Any] = [ """\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """ ] lowercase_ : List[Any] = [ """ Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .""" ] lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""] lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""] assert new_score > prev_score def lowerCamelCase ( ) -> Tuple: lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" ) lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = calculate_rouge_path( data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
30
0
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Any , lowercase_ : Optional[Any] , lowercase_ : int=7 , lowercase_ : Any=3 , lowercase_ : Tuple=18 , lowercase_ : List[Any]=30 , lowercase_ : Union[str, Any]=400 , lowercase_ : Tuple=True , lowercase_ : int=None , lowercase_ : List[Any]=True , ): lowercase_ : List[str] = size if size is not None else {"""height""": 18, """width""": 18} lowercase_ : Any = parent lowercase_ : Union[str, Any] = batch_size lowercase_ : Tuple = num_channels lowercase_ : int = image_size lowercase_ : Optional[Any] = min_resolution lowercase_ : int = max_resolution lowercase_ : List[Any] = do_resize lowercase_ : Tuple = size lowercase_ : Union[str, Any] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : str ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """clusters""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) lowercase_ : str = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Optional[Any] = os.path.join(lowercase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowercase_ ) lowercase_ : Optional[int] = self.image_processing_class.from_json_file(lowercase_ ).to_dict() lowercase_ : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = self.image_processing_class.from_pretrained(lowercase_ ).to_dict() lowercase_ : Any = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def lowerCamelCase ( ) -> int: lowercase_ : int = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowercase_ : Union[str, Any] = Image.open(dataset[4]["""file"""] ) lowercase_ : Optional[Any] = Image.open(dataset[5]["""file"""] ) lowercase_ : Optional[Any] = [imagea, imagea] return images @require_vision @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Union[str, Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase_ : Tuple = prepare_images() # test non-batched lowercase_ : Optional[Any] = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowercase_ : Union[str, Any] = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ ) # test batched lowercase_ : int = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowercase_ : Dict = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
704
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''speech_to_text''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ): lowercase_ : List[Any] = vocab_size lowercase_ : str = d_model lowercase_ : List[Any] = encoder_ffn_dim lowercase_ : str = encoder_layers lowercase_ : Dict = encoder_attention_heads lowercase_ : str = decoder_ffn_dim lowercase_ : int = decoder_layers lowercase_ : Any = decoder_attention_heads lowercase_ : Any = dropout lowercase_ : Dict = attention_dropout lowercase_ : Optional[int] = activation_dropout lowercase_ : Any = activation_function lowercase_ : Union[str, Any] = init_std lowercase_ : str = encoder_layerdrop lowercase_ : Optional[int] = decoder_layerdrop lowercase_ : Dict = use_cache lowercase_ : Union[str, Any] = encoder_layers lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True lowercase_ : Dict = max_source_positions lowercase_ : Optional[int] = max_target_positions lowercase_ : Tuple = num_conv_layers lowercase_ : Tuple = list(lowercase_ ) lowercase_ : Union[str, Any] = conv_channels lowercase_ : str = input_feat_per_channel lowercase_ : str = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
30
0
'''simple docstring''' from __future__ import annotations _lowercase : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _lowercase : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def lowerCamelCase ( UpperCAmelCase__ : list[float] ) -> list[float]: lowercase_ : Dict = [] lowercase_ : str = len(UpperCAmelCase__ ) for i in range(UpperCAmelCase__ ): lowercase_ : float = -1 for j in range(i + 1 , UpperCAmelCase__ ): if arr[i] < arr[j]: lowercase_ : int = arr[j] break result.append(UpperCAmelCase__ ) return result def lowerCamelCase ( UpperCAmelCase__ : list[float] ) -> list[float]: lowercase_ : str = [] for i, outer in enumerate(UpperCAmelCase__ ): lowercase_ : float = -1 for inner in arr[i + 1 :]: if outer < inner: lowercase_ : Any = inner break result.append(UpperCAmelCase__ ) return result def lowerCamelCase ( UpperCAmelCase__ : list[float] ) -> list[float]: lowercase_ : str = len(UpperCAmelCase__ ) lowercase_ : list[float] = [] lowercase_ : list[float] = [-1] * arr_size for index in reversed(range(UpperCAmelCase__ ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: lowercase_ : Union[str, Any] = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _lowercase : List[Any] = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
705
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __magic_name__ : def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ): lowercase_ : int = parent lowercase_ : str = batch_size lowercase_ : List[str] = image_size lowercase_ : str = num_channels lowercase_ : List[Any] = patch_size lowercase_ : Optional[Any] = num_frames lowercase_ : Dict = is_training lowercase_ : int = use_labels lowercase_ : List[str] = hidden_size lowercase_ : Dict = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : List[Any] = attention_probs_dropout_prob lowercase_ : Any = attention_type lowercase_ : Union[str, Any] = initializer_range lowercase_ : List[str] = scope lowercase_ : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token lowercase_ : Dict = (image_size // patch_size) ** 2 lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Optional[Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase_ : int = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : int = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) lowercase_ : Any = self.num_labels return config def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ): lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ): lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) # verify the logits shape lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[str] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs lowercase_ : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ): lowercase_ : List[Any] = copy.deepcopy(lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): lowercase_ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : str = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Dict = model_class(lowercase_ ) lowercase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): if not self.has_attentions: pass else: lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : List[str] = True for model_class in self.all_model_classes: lowercase_ : str = self.model_tester.seq_length lowercase_ : int = self.model_tester.num_frames lowercase_ : int = True lowercase_ : Any = False lowercase_ : str = True lowercase_ : int = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : List[str] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase_ : List[str] = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : int = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) lowercase_ : Optional[Any] = len(lowercase_ ) # Check attention is always last and order is fine lowercase_ : Tuple = True lowercase_ : Dict = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + 1 , len(lowercase_ ) ) lowercase_ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ): lowercase_ : List[str] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : Dict = outputs.hidden_states lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase_ ) , lowercase_ ) lowercase_ : List[Any] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[str] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def lowerCamelCase ( ) -> Optional[int]: lowercase_ : List[str] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) lowercase_ : List[Any] = np.load(UpperCAmelCase__ ) return list(UpperCAmelCase__ ) @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : str ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( lowercase_ ) lowercase_ : Optional[Any] = self.default_image_processor lowercase_ : Any = prepare_video() lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : Optional[Any] = model(**lowercase_ ) # verify the logits lowercase_ : Any = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
30
0
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = (CMStochasticIterativeScheduler,) UpperCamelCase__ = 10 def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **lowercase_ : List[str] ): lowercase_ : List[str] = { """num_train_timesteps""": 201, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } config.update(**lowercase_ ) return config def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = 10 lowercase_ : Any = self.get_scheduler_config() lowercase_ : Dict = self.scheduler_classes[0](**lowercase_ ) scheduler.set_timesteps(lowercase_ ) lowercase_ : Dict = scheduler.timesteps[0] lowercase_ : Optional[int] = scheduler.timesteps[1] lowercase_ : Optional[Any] = self.dummy_sample lowercase_ : Any = 0.1 * sample lowercase_ : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample lowercase_ : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE_ ( self : Any ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.scheduler_classes[0] lowercase_ : Any = self.get_scheduler_config() lowercase_ : Dict = scheduler_class(**lowercase_ ) lowercase_ : Optional[int] = 1 scheduler.set_timesteps(lowercase_ ) lowercase_ : Any = scheduler.timesteps lowercase_ : Optional[Any] = torch.manual_seed(0 ) lowercase_ : Optional[int] = self.dummy_model() lowercase_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowercase_ ): # 1. scale model input lowercase_ : int = scheduler.scale_model_input(lowercase_ , lowercase_ ) # 2. predict noise residual lowercase_ : Optional[int] = model(lowercase_ , lowercase_ ) # 3. predict previous sample x_t-1 lowercase_ : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase_ : str = pred_prev_sample lowercase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) ) lowercase_ : List[Any] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 192.7614 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = self.scheduler_classes[0] lowercase_ : List[Any] = self.get_scheduler_config() lowercase_ : List[str] = scheduler_class(**lowercase_ ) lowercase_ : List[Any] = [106, 0] scheduler.set_timesteps(timesteps=lowercase_ ) lowercase_ : str = scheduler.timesteps lowercase_ : Any = torch.manual_seed(0 ) lowercase_ : List[Any] = self.dummy_model() lowercase_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowercase_ : Any = scheduler.scale_model_input(lowercase_ , lowercase_ ) # 2. predict noise residual lowercase_ : List[Any] = model(lowercase_ , lowercase_ ) # 3. predict previous sample x_t-1 lowercase_ : Optional[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase_ : Dict = pred_prev_sample lowercase_ : List[str] = torch.sum(torch.abs(lowercase_ ) ) lowercase_ : str = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 347.6357 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Union[str, Any] = self.scheduler_classes[0] lowercase_ : List[Any] = self.get_scheduler_config() lowercase_ : Optional[Any] = scheduler_class(**lowercase_ ) lowercase_ : List[Any] = [39, 30, 12, 15, 0] with self.assertRaises(lowercase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[Any] = self.scheduler_classes[0] lowercase_ : Optional[Any] = self.get_scheduler_config() lowercase_ : Optional[Any] = scheduler_class(**lowercase_ ) lowercase_ : Union[str, Any] = [39, 30, 12, 1, 0] lowercase_ : Optional[Any] = len(lowercase_ ) with self.assertRaises(lowercase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = self.scheduler_classes[0] lowercase_ : str = self.get_scheduler_config() lowercase_ : Optional[int] = scheduler_class(**lowercase_ ) lowercase_ : Dict = [scheduler.config.num_train_timesteps] with self.assertRaises( lowercase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowercase_ )
706
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig _lowercase : Tuple = logging.get_logger(__name__) # General docstring _lowercase : List[str] = "RegNetConfig" # Base docstring _lowercase : Dict = "facebook/regnet-y-040" _lowercase : Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring _lowercase : Optional[Any] = "facebook/regnet-y-040" _lowercase : Union[str, Any] = "tabby, tabby cat" _lowercase : str = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __magic_name__ ( nn.Module): def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ): super().__init__() lowercase_ : List[Any] = nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , ) lowercase_ : str = nn.BatchNormad(lowercase_ ) lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ): lowercase_ : Dict = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : List[Any] , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : str = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowercase_ : Any = config.num_channels def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ): lowercase_ : List[str] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) lowercase_ : Any = self.embedder(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ): super().__init__() lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ ) lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ): lowercase_ : Tuple = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : int , lowercase_ : int ): super().__init__() lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) lowercase_ : int = nn.Sequential( nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ): # b c h w -> b c 1 1 lowercase_ : List[str] = self.pooler(lowercase_ ) lowercase_ : Optional[int] = self.attention(lowercase_ ) lowercase_ : Any = hidden_state * attention return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : List[Any] = in_channels != out_channels or stride != 1 lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width ) lowercase_ : Dict = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : List[Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : int = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ): lowercase_ : Any = hidden_state lowercase_ : Union[str, Any] = self.layer(lowercase_ ) lowercase_ : Union[str, Any] = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : str = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : str = in_channels != out_channels or stride != 1 lowercase_ : int = max(1 , out_channels // config.groups_width ) lowercase_ : int = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : Union[str, Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : Optional[int] = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): lowercase_ : Optional[int] = hidden_state lowercase_ : str = self.layer(lowercase_ ) lowercase_ : int = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ): super().__init__() lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer lowercase_ : str = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ): lowercase_ : Tuple = self.layers(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Dict , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : Optional[Any] = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ): self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ): lowercase_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ : Union[str, Any] = hidden_states + (hidden_state,) lowercase_ : Dict = stage_module(lowercase_ ) if output_hidden_states: lowercase_ : Optional[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = RegNetConfig UpperCamelCase__ = '''regnet''' UpperCamelCase__ = '''pixel_values''' UpperCamelCase__ = True def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ): if isinstance(lowercase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ): if isinstance(lowercase_ , lowercase_ ): lowercase_ : List[str] = value _lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Any , lowercase_ : Any ): super().__init__(lowercase_ ) lowercase_ : List[str] = config lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ ) lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ ) lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ): lowercase_ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : str = self.embedder(lowercase_ ) lowercase_ : Optional[Any] = self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : List[Any] = encoder_outputs[0] lowercase_ : str = self.pooler(lowercase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Dict , lowercase_ : str ): super().__init__(lowercase_ ) lowercase_ : Any = config.num_labels lowercase_ : List[str] = RegNetModel(lowercase_ ) # classification head lowercase_ : Any = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ): lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1] lowercase_ : List[Any] = self.classifier(lowercase_ ) lowercase_ : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase_ : Optional[int] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase_ : str = """single_label_classification""" else: lowercase_ : str = """multi_label_classification""" if self.config.problem_type == "regression": lowercase_ : str = MSELoss() if self.num_labels == 1: lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ ) elif self.config.problem_type == "single_label_classification": lowercase_ : Optional[int] = CrossEntropyLoss() lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase_ : Dict = BCEWithLogitsLoss() lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ ) if not return_dict: lowercase_ : Tuple = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
30
0
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> bool: if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3317044064679887385961981 and not allow_probable: raise ValueError( """Warning: upper bound of deterministic test is exceeded. """ """Pass allow_probable=True to allow probabilistic test. """ """A return value of True indicates a probable prime.""" ) # array bounds provided by analysis lowercase_ : List[str] = [ 2047, 1373653, 25326001, 3215031751, 2152302898747, 3474749660383, 341550071728321, 1, 3825123056546413051, 1, 1, 318665857834031151167461, 3317044064679887385961981, ] lowercase_ : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(UpperCAmelCase__ , 1 ): if n < _p: # then we have our last prime to check lowercase_ : Any = primes[:idx] break lowercase_ : str = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: lowercase_ : Optional[int] = False for r in range(UpperCAmelCase__ ): lowercase_ : Any = pow(UpperCAmelCase__ , d * 2**r , UpperCAmelCase__ ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): lowercase_ : List[Any] = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def lowerCamelCase ( ) -> None: assert not miller_rabin(561 ) assert miller_rabin(563 ) # 2047 assert not miller_rabin(838201 ) assert miller_rabin(838207 ) # 1_373_653 assert not miller_rabin(17316001 ) assert miller_rabin(17316017 ) # 25_326_001 assert not miller_rabin(3078386641 ) assert miller_rabin(3078386653 ) # 3_215_031_751 assert not miller_rabin(1713045574801 ) assert miller_rabin(1713045574819 ) # 2_152_302_898_747 assert not miller_rabin(2779799728307 ) assert miller_rabin(2779799728327 ) # 3_474_749_660_383 assert not miller_rabin(113850023909441 ) assert miller_rabin(113850023909527 ) # 341_550_071_728_321 assert not miller_rabin(1275041018848804351 ) assert miller_rabin(1275041018848804391 ) # 3_825_123_056_546_413_051 assert not miller_rabin(79666464458507787791867 ) assert miller_rabin(79666464458507787791951 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(552840677446647897660333 ) assert miller_rabin(552840677446647897660359 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
707
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase : Dict = { "configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = ["RemBertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = ["RemBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RemBertForCausalLM", "RemBertForMaskedLM", "RemBertForMultipleChoice", "RemBertForQuestionAnswering", "RemBertForSequenceClassification", "RemBertForTokenClassification", "RemBertLayer", "RemBertModel", "RemBertPreTrainedModel", "load_tf_weights_in_rembert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRemBertForCausalLM", "TFRemBertForMaskedLM", "TFRemBertForMultipleChoice", "TFRemBertForQuestionAnswering", "TFRemBertForSequenceClassification", "TFRemBertForTokenClassification", "TFRemBertLayer", "TFRemBertModel", "TFRemBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys _lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
708
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ ) lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase_ : str = dataset_size < in_memory_max_size else: lowercase_ : List[Any] = False lowercase_ : Any = is_small_dataset(UpperCAmelCase__ ) assert result == expected
30
0
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() _lowercase : Dict = logging.get_logger(__name__) set_seed(770) _lowercase : List[Any] = { "c_attn": "att_proj", "c_proj": "out_proj", "c_fc": "in_proj", "transformer.": "", "h.": "layers.", "ln_1": "layernorm_1", "ln_2": "layernorm_2", "ln_f": "layernorm_final", "wpe": "position_embeds_layer", "wte": "input_embeds_layer", } _lowercase : Dict = { "text_small": { "repo_id": "suno/bark", "file_name": "text.pt", }, "coarse_small": { "repo_id": "suno/bark", "file_name": "coarse.pt", }, "fine_small": { "repo_id": "suno/bark", "file_name": "fine.pt", }, "text": { "repo_id": "suno/bark", "file_name": "text_2.pt", }, "coarse": { "repo_id": "suno/bark", "file_name": "coarse_2.pt", }, "fine": { "repo_id": "suno/bark", "file_name": "fine_2.pt", }, } _lowercase : Union[str, Any] = os.path.dirname(os.path.abspath(__file__)) _lowercase : int = os.path.join(os.path.expanduser("~"), ".cache") _lowercase : List[str] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0") def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Any=False ) -> int: lowercase_ : Tuple = model_type if use_small: key += "_small" return os.path.join(UpperCAmelCase__ , REMOTE_MODEL_PATHS[key]["""file_name"""] ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int: os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) hf_hub_download(repo_id=UpperCAmelCase__ , filename=UpperCAmelCase__ , local_dir=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]="text" ) -> str: if model_type == "text": lowercase_ : List[Any] = BarkSemanticModel lowercase_ : str = BarkSemanticConfig lowercase_ : Union[str, Any] = BarkSemanticGenerationConfig elif model_type == "coarse": lowercase_ : Union[str, Any] = BarkCoarseModel lowercase_ : List[str] = BarkCoarseConfig lowercase_ : int = BarkCoarseGenerationConfig elif model_type == "fine": lowercase_ : Tuple = BarkFineModel lowercase_ : int = BarkFineConfig lowercase_ : Optional[int] = BarkFineGenerationConfig else: raise NotImplementedError() lowercase_ : List[str] = F'''{model_type}_small''' if use_small else model_type lowercase_ : Optional[int] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(UpperCAmelCase__ ): logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info["""repo_id"""] , model_info["""file_name"""] ) lowercase_ : int = torch.load(UpperCAmelCase__ , map_location=UpperCAmelCase__ ) # this is a hack lowercase_ : int = checkpoint["""model_args"""] if "input_vocab_size" not in model_args: lowercase_ : Optional[Any] = model_args["""vocab_size"""] lowercase_ : Dict = model_args["""vocab_size"""] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments lowercase_ : Dict = model_args.pop("""n_head""" ) lowercase_ : Optional[int] = model_args.pop("""n_embd""" ) lowercase_ : Optional[Any] = model_args.pop("""n_layer""" ) lowercase_ : Union[str, Any] = ConfigClass(**checkpoint["""model_args"""] ) lowercase_ : List[str] = ModelClass(config=UpperCAmelCase__ ) lowercase_ : int = GenerationConfigClass() lowercase_ : List[Any] = model_generation_config lowercase_ : str = checkpoint["""model"""] # fixup checkpoint lowercase_ : Any = """_orig_mod.""" for k, v in list(state_dict.items() ): if k.startswith(UpperCAmelCase__ ): # replace part of the key with corresponding layer name in HF implementation lowercase_ : str = k[len(UpperCAmelCase__ ) :] for old_layer_name in new_layer_name_dict: lowercase_ : str = new_k.replace(UpperCAmelCase__ , new_layer_name_dict[old_layer_name] ) lowercase_ : Dict = state_dict.pop(UpperCAmelCase__ ) lowercase_ : str = set(state_dict.keys() ) - set(model.state_dict().keys() ) lowercase_ : str = {k for k in extra_keys if not k.endswith(""".attn.bias""" )} lowercase_ : List[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() ) lowercase_ : List[str] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )} if len(UpperCAmelCase__ ) != 0: raise ValueError(F'''extra keys found: {extra_keys}''' ) if len(UpperCAmelCase__ ) != 0: raise ValueError(F'''missing keys: {missing_keys}''' ) model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) lowercase_ : Dict = model.num_parameters(exclude_embeddings=UpperCAmelCase__ ) lowercase_ : Union[str, Any] = checkpoint["""best_val_loss"""].item() logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCAmelCase__ , 3 )} loss''' ) model.eval() model.to(UpperCAmelCase__ ) del checkpoint, state_dict return model def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Dict="text" ) -> str: if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() lowercase_ : str = """cpu""" # do conversion on cpu lowercase_ : List[str] = _get_ckpt_path(UpperCAmelCase__ , use_small=UpperCAmelCase__ ) lowercase_ : Union[str, Any] = _load_model(UpperCAmelCase__ , UpperCAmelCase__ , model_type=UpperCAmelCase__ , use_small=UpperCAmelCase__ ) # load bark initial model lowercase_ : Union[str, Any] = _bark_load_model(UpperCAmelCase__ , """cpu""" , model_type=UpperCAmelCase__ , use_small=UpperCAmelCase__ ) if model_type == "text": lowercase_ : List[Any] = bark_model["""model"""] if model.num_parameters(exclude_embeddings=UpperCAmelCase__ ) != bark_model.get_num_params(): raise ValueError("""initial and new models don't have the same number of parameters""" ) # check if same output as the bark model lowercase_ : str = 5 lowercase_ : Any = 10 if model_type in ["text", "coarse"]: lowercase_ : List[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) lowercase_ : Tuple = bark_model(UpperCAmelCase__ )[0] lowercase_ : List[Any] = model(UpperCAmelCase__ ) # take last logits lowercase_ : Dict = output_new_model_total.logits[:, [-1], :] else: lowercase_ : Dict = 3 lowercase_ : Dict = 8 lowercase_ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) lowercase_ : int = model(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Tuple = bark_model(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Optional[int] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("""initial and new outputs don't have the same shape""" ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError("""initial and new outputs are not equal""" ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , ) -> List[str]: lowercase_ : Dict = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : str = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase__ , """config.json""" ) ) lowercase_ : Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase__ , """config.json""" ) ) lowercase_ : List[Any] = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase__ , """config.json""" ) ) lowercase_ : Any = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" ) lowercase_ : List[str] = BarkSemanticModel.from_pretrained(UpperCAmelCase__ ) lowercase_ : str = BarkCoarseModel.from_pretrained(UpperCAmelCase__ ) lowercase_ : List[Any] = BarkFineModel.from_pretrained(UpperCAmelCase__ ) lowercase_ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_24khz""" ) lowercase_ : Union[str, Any] = BarkConfig.from_sub_model_configs( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) lowercase_ : Union[str, Any] = BarkModel(UpperCAmelCase__ ) lowercase_ : str = semantic lowercase_ : Optional[int] = coarseAcoustic lowercase_ : Tuple = fineAcoustic lowercase_ : Union[str, Any] = codec lowercase_ : Tuple = bark_generation_config Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) bark.save_pretrained(UpperCAmelCase__ , repo_id=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument("model_type", type=str, help="text, coarse or fine.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.") _lowercase : int = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
709
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ) lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" ) lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy() lowercase_ : Optional[int] = -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
30
0
'''simple docstring''' from __future__ import annotations import math _lowercase : Union[str, Any] = "2020.9.26" _lowercase : Any = "xcodz-dot, cclaus, dhruvmanila" def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> tuple[float, float]: if not all(isinstance(UpperCAmelCase__ , (float, int) ) for val in locals().values() ): lowercase_ : str = F'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(UpperCAmelCase__ ) lowercase_ : str = ((x * distance) / (z + distance)) * scale lowercase_ : Optional[Any] = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : str , UpperCAmelCase__ : float ) -> tuple[float, float, float]: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Axis must be a str""" ) lowercase_ : int = locals() del input_variables["axis"] if not all(isinstance(UpperCAmelCase__ , (float, int) ) for val in input_variables.values() ): lowercase_ : List[Any] = ( """Input values except axis must either be float or int: """ F'''{list(input_variables.values() )}''' ) raise TypeError(UpperCAmelCase__ ) lowercase_ : Optional[Any] = (angle % 360) / 450 * 180 / math.pi if axis == "z": lowercase_ : Union[str, Any] = x * math.cos(UpperCAmelCase__ ) - y * math.sin(UpperCAmelCase__ ) lowercase_ : Optional[Any] = y * math.cos(UpperCAmelCase__ ) + x * math.sin(UpperCAmelCase__ ) lowercase_ : Dict = z elif axis == "x": lowercase_ : Dict = y * math.cos(UpperCAmelCase__ ) - z * math.sin(UpperCAmelCase__ ) lowercase_ : int = z * math.cos(UpperCAmelCase__ ) + y * math.sin(UpperCAmelCase__ ) lowercase_ : str = x elif axis == "y": lowercase_ : str = x * math.cos(UpperCAmelCase__ ) - z * math.sin(UpperCAmelCase__ ) lowercase_ : Any = z * math.cos(UpperCAmelCase__ ) + x * math.sin(UpperCAmelCase__ ) lowercase_ : Optional[Any] = y else: raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
710
'''simple docstring''' from collections.abc import Callable import numpy as np def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array: lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) ) lowercase_ : List[Any] = np.zeros((n + 1,) ) lowercase_ : List[Any] = ya lowercase_ : List[str] = xa for k in range(UpperCAmelCase__ ): lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] ) lowercase_ : List[Any] = y[k] + ( (step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None: if start is None: lowercase_ : Any = 0 if end is None: lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1 if start >= end: return lowercase_ : Optional[int] = (start + end) // 2 slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ ) if sequence[end] < sequence[mid]: lowercase_ : Dict = sequence[mid], sequence[end] slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
711
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Dict = (1 - _cos) / 2 lowercase_ : Optional[int] = 1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : List[Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Optional[int] = sin(UpperCAmelCase__ ) lowercase_ : Dict = cos(UpperCAmelCase__ ) lowercase_ : Optional[int] = _sin / (2 * q_factor) lowercase_ : Dict = (1 + _cos) / 2 lowercase_ : str = -1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : List[Any] = 1 - alpha lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : int = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ ) lowercase_ : str = _sin / (2 * q_factor) lowercase_ : str = _sin / 2 lowercase_ : Any = 0 lowercase_ : Optional[Any] = -ba lowercase_ : Dict = 1 + alpha lowercase_ : Union[str, Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : List[str] = tau * frequency / samplerate lowercase_ : Any = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : Optional[Any] = _sin / (2 * q_factor) lowercase_ : Any = 1 - alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : Optional[int] = 1 + alpha lowercase_ : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : List[Any] = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : List[str] = 1 + alpha * big_a lowercase_ : List[Any] = -2 * _cos lowercase_ : Dict = 1 - alpha * big_a lowercase_ : str = 1 + alpha / big_a lowercase_ : List[str] = -2 * _cos lowercase_ : Tuple = 1 - alpha / big_a lowercase_ : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ ) lowercase_ : Any = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (pmc + aaa) lowercase_ : List[str] = 2 * big_a * mpc lowercase_ : Union[str, Any] = big_a * (pmc - aaa) lowercase_ : Optional[int] = ppmc + aaa lowercase_ : Optional[int] = -2 * pmpc lowercase_ : Any = ppmc - aaa lowercase_ : Optional[int] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Dict = _sin / (2 * q_factor) lowercase_ : Union[str, Any] = 10 ** (gain_db / 40) lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (ppmc + aaa) lowercase_ : List[Any] = -2 * big_a * pmpc lowercase_ : Optional[Any] = big_a * (ppmc - aaa) lowercase_ : Optional[Any] = pmc + aaa lowercase_ : int = 2 * mpc lowercase_ : Tuple = pmc - aaa lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
30
0
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class __magic_name__ ( _UpperCAmelCase): @staticmethod @abstractmethod def SCREAMING_SNAKE_CASE_ ( lowercase_ : ArgumentParser ): raise NotImplementedError() @abstractmethod def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): raise NotImplementedError()
712
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _lowercase : str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __magic_name__ ( datasets.BuilderConfig): UpperCamelCase__ = None def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str: import pyspark def generate_fn(): lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" ) lowercase_ : Any = partition_df.collect() lowercase_ : Dict = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class __magic_name__ ( _BaseExamplesIterable): def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ): lowercase_ : Dict = df lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ): yield from self.generate_examples_fn() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ): lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return len(self.partition_order ) class __magic_name__ ( datasets.DatasetBuilder): UpperCamelCase__ = SparkConfig def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ): import pyspark lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowercase_ : Optional[int] = df lowercase_ : List[str] = working_dir super().__init__( cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , ) def SCREAMING_SNAKE_CASE_ ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(lowercase_ : str ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase_ ) lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase_ , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowercase_ : str = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): import pyspark def get_arrow_batch_size(lowercase_ : Any ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) lowercase_ : Union[str, Any] = self.df.count() lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowercase_ : Any = ( self.df.limit(lowercase_ ) .repartition(1 ) .mapInArrow(lowercase_ , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) ) lowercase_ : Any = self.df.repartition(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ): import pyspark lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath lowercase_ : Optional[Any] = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowercase_ : Tuple = self.config.features lowercase_ : Any = self._writer_batch_size lowercase_ : List[str] = self._fs.storage_options def write_arrow(lowercase_ : str ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId() lowercase_ : Dict = next(lowercase_ , lowercase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) lowercase_ : int = 0 lowercase_ : List[Any] = writer_class( features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowercase_ , lowercase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 lowercase_ : Any = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : List[str] = pa.Table.from_batches([batch] ) writer.write_table(lowercase_ ) if writer._num_bytes > 0: lowercase_ , lowercase_ : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase_ ) ): lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) ) shutil.move(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = ( self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ): self._validate_cache_dir() lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase_ ) lowercase_ : Tuple = not is_remote_filesystem(self._fs ) lowercase_ : int = os.path.join if is_local else posixpath.join lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN""" lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ ) lowercase_ : Any = 0 lowercase_ : Tuple = 0 lowercase_ : int = 0 lowercase_ : Dict = [] lowercase_ : Union[str, Any] = [] for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ): ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Union[str, Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase_ ) lowercase_ : List[str] = total_num_examples lowercase_ : int = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: lowercase_ : Tuple = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowercase_ : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowercase_ : int , lowercase_ : int , lowercase_ : int , ): rename( lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , ) lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 0 for i in range(len(lowercase_ ) ): lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i] for shard_id in range(lowercase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect() else: # don't use any pattern lowercase_ : List[str] = 0 lowercase_ : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
30
0
'''simple docstring''' import os import sys import unittest _lowercase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _lowercase : Any = os.path.join(git_repo_path, "src", "transformers") _lowercase : Tuple = "\n{0} = None\n" _lowercase : Optional[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n" _lowercase : Any = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Union[str, Any] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" ) self.assertIsNone(lowercase_ ) lowercase_ : Optional[int] = find_backend(""" if not is_tokenizers_available():""" ) self.assertEqual(lowercase_ , """tokenizers""" ) lowercase_ : Optional[Any] = find_backend(""" if not is_tensorflow_text_available():""" ) self.assertEqual(lowercase_ , """tensorflow_text""" ) lowercase_ : str = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" ) self.assertEqual(lowercase_ , """sentencepiece_and_tokenizers""" ) lowercase_ : str = find_backend( """ if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" ) self.assertEqual(lowercase_ , """sentencepiece_and_tensorflow_text""" ) lowercase_ : Union[str, Any] = find_backend( """ if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" ) self.assertEqual(lowercase_ , """sentencepiece_and_tokenizers_and_vision""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Union[str, Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""" , lowercase_ ) self.assertIn("""tensorflow_text""" , lowercase_ ) self.assertIn("""sentencepiece_and_tokenizers""" , lowercase_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""BertModel""" , objects["""torch"""] ) self.assertIn("""TFBertModel""" , objects["""tf"""] ) self.assertIn("""FlaxBertModel""" , objects["""flax"""] ) self.assertIn("""BertModel""" , objects["""torch"""] ) self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] ) self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = create_dummy_object("""CONSTANT""" , """'torch'""" ) self.assertEqual(lowercase_ , """\nCONSTANT = None\n""" ) lowercase_ : Optional[Any] = create_dummy_object("""function""" , """'torch'""" ) self.assertEqual( lowercase_ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) lowercase_ : Tuple = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ lowercase_ : str = create_dummy_object("""FakeClass""" , """'torch'""" ) self.assertEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) """ lowercase_ : Optional[int] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""] , lowercase_ )
713
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Dict = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ): lowercase_ : str = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Dict = (1 - _cos) / 2 lowercase_ : Optional[int] = 1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : List[Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ): lowercase_ : str = tau * frequency / samplerate lowercase_ : Optional[int] = sin(UpperCAmelCase__ ) lowercase_ : Dict = cos(UpperCAmelCase__ ) lowercase_ : Optional[int] = _sin / (2 * q_factor) lowercase_ : Dict = (1 + _cos) / 2 lowercase_ : str = -1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : List[Any] = 1 - alpha lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ): lowercase_ : int = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ ) lowercase_ : str = _sin / (2 * q_factor) lowercase_ : str = _sin / 2 lowercase_ : Any = 0 lowercase_ : Optional[Any] = -ba lowercase_ : Dict = 1 + alpha lowercase_ : Union[str, Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ): lowercase_ : List[str] = tau * frequency / samplerate lowercase_ : Any = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : Optional[Any] = _sin / (2 * q_factor) lowercase_ : Any = 1 - alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : Optional[int] = 1 + alpha lowercase_ : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ): lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : List[Any] = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : List[str] = 1 + alpha * big_a lowercase_ : List[Any] = -2 * _cos lowercase_ : Dict = 1 - alpha * big_a lowercase_ : str = 1 + alpha / big_a lowercase_ : List[str] = -2 * _cos lowercase_ : Tuple = 1 - alpha / big_a lowercase_ : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ): lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ ) lowercase_ : Any = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (pmc + aaa) lowercase_ : List[str] = 2 * big_a * mpc lowercase_ : Union[str, Any] = big_a * (pmc - aaa) lowercase_ : Optional[int] = ppmc + aaa lowercase_ : Optional[int] = -2 * pmpc lowercase_ : Any = ppmc - aaa lowercase_ : Optional[int] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ): lowercase_ : str = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Dict = _sin / (2 * q_factor) lowercase_ : Union[str, Any] = 10 ** (gain_db / 40) lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (ppmc + aaa) lowercase_ : List[Any] = -2 * big_a * pmpc lowercase_ : Optional[Any] = big_a * (ppmc - aaa) lowercase_ : Optional[Any] = pmc + aaa lowercase_ : int = 2 * mpc lowercase_ : Tuple = pmc - aaa lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
714
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def lowerCamelCase ( ) -> None: lowercase_ : List[Any] = input("""Enter message: """ ) lowercase_ : str = input("""Enter key [alphanumeric]: """ ) lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): lowercase_ : List[str] = """encrypt""" lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) elif mode.lower().startswith("""d""" ): lowercase_ : Any = """decrypt""" lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''\n{mode.title()}ed message:''' ) print(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: lowercase_ : Union[str, Any] = [] lowercase_ : List[Any] = 0 lowercase_ : str = key.upper() for symbol in message: lowercase_ : Tuple = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(UpperCAmelCase__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(UpperCAmelCase__ ): lowercase_ : Any = 0 else: translated.append(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) if __name__ == "__main__": main()
30
0
'''simple docstring''' import argparse _lowercase : Optional[int] = "docs/source/_static/js/custom.js" def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict: with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Optional[int] = f.readlines() lowercase_ : Tuple = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += F''' "v{version}": "v{version}",\n''' with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") _lowercase : Dict = parser.parse_args() update_custom_js(args.version)
715
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = """ylacombe/bark-small""" lowercase_ : List[str] = tempfile.mkdtemp() lowercase_ : Tuple = """en_speaker_1""" lowercase_ : Union[str, Any] = """This is a test string""" lowercase_ : int = """speaker_embeddings_path.json""" lowercase_ : Any = """speaker_embeddings""" def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ): return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = self.get_tokenizer() lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase_ : Optional[int] = 35 lowercase_ : int = 2 lowercase_ : Union[str, Any] = 8 lowercase_ : Union[str, Any] = { """semantic_prompt""": np.ones(lowercase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : Dict = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : List[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_tokenizer() lowercase_ : int = BarkProcessor(tokenizer=lowercase_ ) lowercase_ : Any = processor(text=self.input_string ) lowercase_ : List[str] = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
30
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __magic_name__ ( unittest.TestCase): UpperCamelCase__ = ViTImageProcessor if is_vision_available() else None @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Tuple = (3, 32, 128) lowercase_ : Any = tempfile.mkdtemp() # fmt: off lowercase_ : str = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on lowercase_ : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowercase_ ) + """\n""" ) lowercase_ : Optional[Any] = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } lowercase_ : Union[str, Any] = os.path.join(self.tmpdirname , lowercase_ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **lowercase_ : Any ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int , **lowercase_ : Any ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : int = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) lowercase_ : Dict = Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) return image_input def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : Optional[int] = self.get_image_processor() lowercase_ : Dict = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Optional[int] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.get_tokenizer() lowercase_ : Optional[int] = self.get_image_processor() lowercase_ : int = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) lowercase_ : int = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[Any] = self.get_image_processor() lowercase_ : Tuple = self.get_tokenizer() lowercase_ : Dict = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase_ : Tuple = self.prepare_image_inputs() lowercase_ : List[str] = image_processor(lowercase_ , return_tensors="""np""" ) lowercase_ : Optional[int] = processor(images=lowercase_ , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : Tuple = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase_ : Any = """test""" lowercase_ : str = processor(text=lowercase_ ) lowercase_ : Union[str, Any] = tokenizer(lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : Union[str, Any] = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase_ : Optional[Any] = """test""" lowercase_ : int = self.prepare_image_inputs() lowercase_ : Dict = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.get_image_processor() lowercase_ : Tuple = self.get_tokenizer() lowercase_ : Union[str, Any] = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.char_decode(lowercase_ ) lowercase_ : Optional[int] = tokenizer.batch_decode(lowercase_ ) lowercase_ : List[str] = [seq.replace(""" """ , """""" ) for seq in decoded_tok] self.assertListEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = self.get_image_processor() lowercase_ : Any = self.get_tokenizer() lowercase_ : Dict = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase_ : int = None lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Any = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[Any] = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : Any = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase_ : Optional[int] = torch.randn(1 , 27 , 38 ) lowercase_ : List[str] = torch.randn(1 , 27 , 50257 ) lowercase_ : List[Any] = torch.randn(1 , 27 , 30522 ) lowercase_ : Dict = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
716
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True}) UpperCamelCase__ = Features({'''image''': Image()}) UpperCamelCase__ = Features({'''labels''': ClassLabel}) UpperCamelCase__ = "image" UpperCamelCase__ = "labels" def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowercase_ : List[str] = copy.deepcopy(self ) lowercase_ : List[str] = self.label_schema.copy() lowercase_ : List[Any] = features[self.label_column] lowercase_ : Optional[Any] = label_schema return task_template @property def SCREAMING_SNAKE_CASE_ ( self : int ): return { self.image_column: "image", self.label_column: "labels", }
30
0
'''simple docstring''' import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowercase_ : List[Any] = s.rsplit(UpperCAmelCase__ , UpperCAmelCase__ ) return new.join(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> int: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: lowercase_ : int = {} lowercase_ : Any = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: lowercase_ : Tuple = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' ) if "res_path" in key: lowercase_ : Any = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): lowercase_ : int = rreplace(UpperCAmelCase__ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): lowercase_ : int = rreplace(UpperCAmelCase__ , """.b""" , """.bias""" , 1 ) lowercase_ : int = value.float() return upgrade @torch.no_grad() def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=True ) -> Tuple: from dall_e import Encoder lowercase_ : Tuple = Encoder() if os.path.exists(UpperCAmelCase__ ): lowercase_ : Tuple = torch.load(UpperCAmelCase__ ) else: lowercase_ : Dict = torch.hub.load_state_dict_from_url(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Dict = ckpt.state_dict() encoder.load_state_dict(UpperCAmelCase__ ) if config_path is not None: lowercase_ : Dict = FlavaImageCodebookConfig.from_pretrained(UpperCAmelCase__ ) else: lowercase_ : Any = FlavaImageCodebookConfig() lowercase_ : Optional[int] = FlavaImageCodebook(UpperCAmelCase__ ).eval() lowercase_ : int = encoder.state_dict() lowercase_ : Optional[Any] = upgrade_state_dict(UpperCAmelCase__ ) hf_model.load_state_dict(UpperCAmelCase__ ) lowercase_ : int = hf_model.state_dict() lowercase_ : Dict = count_parameters(UpperCAmelCase__ ) lowercase_ : List[str] = count_parameters(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(UpperCAmelCase__ ) else: return hf_state_dict if __name__ == "__main__": _lowercase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") _lowercase : Optional[int] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
717
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowercase_ : str = 1.5 lowercase_ : List[Any] = int(factor * num_class_images ) lowercase_ : int = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ : List[str] = client.query(text=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase_ : List[str] = int(factor * num_images ) lowercase_ : List[str] = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , ) lowercase_ : List[str] = 0 lowercase_ : Dict = 0 lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open( F'''{class_data_dir}/images.txt''' , """w""" ) as fa: while total < num_class_images: lowercase_ : str = class_images[count] count += 1 try: lowercase_ : Union[str, Any] = requests.get(images["""url"""] ) if img.status_code == 200: lowercase_ : List[str] = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ ) return parser.parse_args() if __name__ == "__main__": _lowercase : Dict = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
30
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowercase : Optional[int] = logging.get_logger(__name__) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **lowercase_ : Tuple ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Dict = deprecated_arg[3:] lowercase_ : Dict = not kwargs.pop(lowercase_ ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Optional[Any] = kwargs.pop("""tpu_name""" , self.tpu_name ) lowercase_ : Optional[int] = kwargs.pop("""device_idx""" , self.device_idx ) lowercase_ : List[Any] = kwargs.pop("""eager_mode""" , self.eager_mode ) lowercase_ : Dict = kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**lowercase_ ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Name of TPU'''}, ) UpperCamelCase__ = field( default=0, metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''}, ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Benchmark models in eager model.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' }, ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): requires_backends(self , ["""tf"""] ) lowercase_ : Tuple = None if self.tpu: try: if self.tpu_name: lowercase_ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowercase_ : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowercase_ : Optional[int] = None return tpu @cached_property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowercase_ : str = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) lowercase_ : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU lowercase_ : Dict = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def SCREAMING_SNAKE_CASE_ ( self : int ): requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def SCREAMING_SNAKE_CASE_ ( self : Any ): requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): return self.n_gpu > 0
718
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None: if start is None: lowercase_ : Any = 0 if end is None: lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1 if start >= end: return lowercase_ : Optional[int] = (start + end) // 2 slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ ) if sequence[end] < sequence[mid]: lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end] slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Dict = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
719
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowercase : Dict = parser.parse_args() _lowercase : Dict = "cpu" _lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowercase : Any = "path-to-your-trained-model" _lowercase : str = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowercase : Any = pipe.to(device) # to channels last _lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last) _lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last) _lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowercase : int = torch.randn(2, 4, 64, 64) _lowercase : int = torch.rand(1) * 999 _lowercase : Union[str, Any] = torch.randn(2, 77, 768) _lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status) try: _lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowercase : int = 666 _lowercase : Any = torch.Generator(device).manual_seed(seed) _lowercase : int = {"generator": generator} if args.steps is not None: _lowercase : Optional[int] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
30
0
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __magic_name__ : def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : str=14 , lowercase_ : int=7 , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=False , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=99 , lowercase_ : Optional[int]=32 , lowercase_ : int=4 , lowercase_ : List[Any]=4 , lowercase_ : Tuple=4 , lowercase_ : Any=37 , lowercase_ : Dict="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=512 , lowercase_ : Tuple=0.02 , ): lowercase_ : Optional[int] = parent lowercase_ : List[Any] = batch_size lowercase_ : str = seq_length lowercase_ : List[str] = is_training lowercase_ : List[str] = use_input_mask lowercase_ : int = use_token_type_ids lowercase_ : str = use_labels lowercase_ : Dict = vocab_size lowercase_ : Optional[Any] = hidden_size lowercase_ : int = rotary_dim lowercase_ : str = num_hidden_layers lowercase_ : List[Any] = num_attention_heads lowercase_ : Union[str, Any] = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : List[Any] = hidden_dropout_prob lowercase_ : List[str] = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Tuple = initializer_range lowercase_ : Any = None lowercase_ : Dict = vocab_size - 1 lowercase_ : Tuple = vocab_size - 1 lowercase_ : List[Any] = vocab_size - 1 def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ : List[Any] = None if self.use_input_mask: lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Dict = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Tuple = config_and_inputs lowercase_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ): lowercase_ : str = 20 lowercase_ : Optional[Any] = model_class_name(lowercase_ ) lowercase_ : int = model.init_cache(input_ids.shape[0] , lowercase_ ) lowercase_ : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) lowercase_ : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowercase_ : Tuple = model( input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , ) lowercase_ : Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowercase_ : str = model( input_ids[:, -1:] , attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowercase_ , ) lowercase_ : int = model(lowercase_ ) lowercase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] ): lowercase_ : int = 20 lowercase_ : List[Any] = model_class_name(lowercase_ ) lowercase_ : Any = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) lowercase_ : int = model.init_cache(input_ids.shape[0] , lowercase_ ) lowercase_ : int = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowercase_ : int = model( input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , ) lowercase_ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowercase_ : Optional[int] = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowercase_ , position_ids=lowercase_ , ) lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ ) lowercase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) @require_flax class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () UpperCamelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Union[str, Any] = FlaxGPTJModelTester(self ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): for model_class_name in self.all_model_classes: lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) @tooslow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) lowercase_ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowercase_ , truncation=lowercase_ ) lowercase_ : List[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) lowercase_ : Optional[Any] = False lowercase_ : Optional[int] = model.config.eos_token_id lowercase_ : Union[str, Any] = jax.jit(model.generate ) lowercase_ : Any = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences lowercase_ : List[Any] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) lowercase_ : List[str] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowercase_ , lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowercase_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase_ : Union[str, Any] = getattr(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = pt_inputs["""input_ids"""].shape lowercase_ : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowercase_ ): lowercase_ : List[str] = 0 lowercase_ : Optional[int] = 1 lowercase_ : str = 0 lowercase_ : List[str] = 1 lowercase_ : List[Any] = pt_model_class(lowercase_ ).eval() lowercase_ : Optional[int] = model_class(lowercase_ , dtype=jnp.floataa ) lowercase_ : List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Dict = fx_state with torch.no_grad(): lowercase_ : List[Any] = pt_model(**lowercase_ ).to_tuple() lowercase_ : Union[str, Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowercase_ , lowercase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Dict = model_class.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Any = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual( len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowercase_ , lowercase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowercase_ : Tuple = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase_ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase_ : Optional[Any] = getattr(lowercase_ , lowercase_ ) lowercase_ : Dict = pt_model_class(lowercase_ ).eval() lowercase_ : Dict = model_class(lowercase_ , dtype=jnp.floataa ) lowercase_ : Any = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) lowercase_ : Optional[Any] = pt_inputs["""input_ids"""].shape lowercase_ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowercase_ ): lowercase_ : Tuple = 0 lowercase_ : int = 1 lowercase_ : List[Any] = 0 lowercase_ : Any = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowercase_ : Tuple = pt_model(**lowercase_ ).to_tuple() lowercase_ : str = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowercase_ , lowercase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Dict = pt_model_class.from_pretrained(lowercase_ , from_flax=lowercase_ ) with torch.no_grad(): lowercase_ : str = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual( len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowercase_ , lowercase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def SCREAMING_SNAKE_CASE_ ( self : int ): for model_class_name in self.all_model_classes: lowercase_ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) lowercase_ : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowercase_ )
720
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Optional[Any] = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Optional[Any]: for param in module.parameters(): lowercase_ : Optional[Any] = False def lowerCamelCase ( ) -> str: lowercase_ : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase_ : Union[str, Any] = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> int: lowercase_ : Union[str, Any] = plt.imshow(UpperCAmelCase__ ) fig.axes.get_xaxis().set_visible(UpperCAmelCase__ ) fig.axes.get_yaxis().set_visible(UpperCAmelCase__ ) plt.show() def lowerCamelCase ( ) -> str: lowercase_ : Dict = datetime.now() lowercase_ : Optional[Any] = current_time.strftime("""%H:%M:%S""" ) return timestamp
721
'''simple docstring''' import unittest import numpy as np def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray: lowercase_ : List[Any] = np.shape(UpperCAmelCase__ ) lowercase_ : Dict = np.shape(UpperCAmelCase__ ) lowercase_ : int = np.shape(UpperCAmelCase__ ) if shape_a[0] != shape_b[0]: lowercase_ : Optional[int] = ( """Expected the same number of rows for A and B. """ F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(UpperCAmelCase__ ) if shape_b[1] != shape_c[1]: lowercase_ : Optional[Any] = ( """Expected the same number of columns for B and C. """ F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(UpperCAmelCase__ ) lowercase_ : Any = pseudo_inv if a_inv is None: try: lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Dict = np.array([[2, 1], [6, 3]] ) lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] ) lowercase_ : Optional[int] = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) self.assertAlmostEqual(lowercase_ , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
0
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration _lowercase : Optional[int] = [ # tf -> hf ("/", "."), ("layer_", "layers."), ("kernel", "weight"), ("beta", "bias"), ("gamma", "weight"), ("pegasus", "model"), ] _lowercase : int = [ (".output.dense", ".fc2"), ("intermediate.LayerNorm", "final_layer_norm"), ("intermediate.dense", "fc1"), ] _lowercase : Tuple = ( INIT_COMMON + [ ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.out_proj"), ("attention.self", "self_attn"), ("attention.encdec.LayerNorm", "encoder_attn_layer_norm"), ("attention.encdec_output.dense", "encoder_attn.out_proj"), ("attention.encdec", "encoder_attn"), ("key", "k_proj"), ("value", "v_proj"), ("query", "q_proj"), ("decoder.LayerNorm", "decoder.layernorm_embedding"), ] + END_COMMON ) _lowercase : str = ( INIT_COMMON + [ ("embeddings.word_embeddings", "shared.weight"), ("embeddings.position_embeddings", "embed_positions.weight"), ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.output"), ("attention.self", "self_attn.self"), ("encoder.LayerNorm", "encoder.layernorm_embedding"), ] + END_COMMON ) _lowercase : int = [ "encdec/key/bias", "encdec/query/bias", "encdec/value/bias", "self/key/bias", "self/query/bias", "self/value/bias", "encdec_output/dense/bias", "attention/output/dense/bias", ] def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) -> List[Any]: for tf_name, hf_name in patterns: lowercase_ : Optional[Any] = k.replace(UpperCAmelCase__ , UpperCAmelCase__ ) return k def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : dict ) -> BigBirdPegasusForConditionalGeneration: lowercase_ : Union[str, Any] = BigBirdPegasusConfig(**UpperCAmelCase__ ) lowercase_ : List[str] = BigBirdPegasusForConditionalGeneration(UpperCAmelCase__ ) lowercase_ : List[Any] = torch_model.state_dict() lowercase_ : Dict = {} # separating decoder weights lowercase_ : Optional[int] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} lowercase_ : List[str] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): lowercase_ : Dict = [k.endswith(UpperCAmelCase__ ) for ending in KEYS_TO_IGNORE] if any(UpperCAmelCase__ ): continue lowercase_ : Any = DECODER_PATTERNS lowercase_ : Tuple = rename_state_dict_key(UpperCAmelCase__ , UpperCAmelCase__ ) if new_k not in state_dict: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowercase_ : Any = v.T lowercase_ : Optional[Any] = torch.from_numpy(UpperCAmelCase__ ) assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): lowercase_ : Any = [k.endswith(UpperCAmelCase__ ) for ending in KEYS_TO_IGNORE] if any(UpperCAmelCase__ ): continue lowercase_ : str = REMAINING_PATTERNS lowercase_ : Optional[int] = rename_state_dict_key(UpperCAmelCase__ , UpperCAmelCase__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowercase_ : Any = v.T lowercase_ : List[str] = torch.from_numpy(UpperCAmelCase__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' lowercase_ : int = mapping["""model.embed_positions.weight"""] lowercase_ : List[str] = mapping.pop("""model.embed_positions.weight""" ) lowercase_ : List[Any] = torch_model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) lowercase_ : Optional[Any] = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Dict: lowercase_ : List[Any] = tf.train.list_variables(UpperCAmelCase__ ) lowercase_ : Optional[int] = {} lowercase_ : int = ["""global_step"""] for name, shape in tqdm(UpperCAmelCase__ , desc="""converting tf checkpoint to dict""" ): lowercase_ : Optional[int] = any(pat in name for pat in ignore_name ) if skip_key: continue lowercase_ : List[str] = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = array return tf_weights def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : dict ) -> Optional[int]: lowercase_ : List[Any] = get_tf_weights_as_numpy(UpperCAmelCase__ ) lowercase_ : Any = convert_bigbird_pegasus(UpperCAmelCase__ , UpperCAmelCase__ ) torch_model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : int = argparse.ArgumentParser() parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.") _lowercase : Optional[Any] = parser.parse_args() _lowercase : Any = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
700
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(UpperCAmelCase__ ) lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data ) lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6) else: lowercase_ : Union[str, Any] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode() + padding ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[str] = ( """argument should be a bytes-like object or ASCII string, """ F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(UpperCAmelCase__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): try: lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowercase_ : Any = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase_ : Optional[int] = encoded_data[:-padding] lowercase_ : Any = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase_ : int = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data ) lowercase_ : Optional[int] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(UpperCAmelCase__ ) , 8 ) ] return bytes(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ShapEImgaImgPipeline UpperCamelCase__ = ['''image'''] UpperCamelCase__ = ['''image'''] UpperCamelCase__ = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] UpperCamelCase__ = False @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return 32 @property def SCREAMING_SNAKE_CASE_ ( self : int ): return 32 @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE_ ( self : Any ): return 8 @property def SCREAMING_SNAKE_CASE_ ( self : Any ): torch.manual_seed(0 ) lowercase_ : Tuple = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowercase_ : Tuple = CLIPVisionModel(lowercase_ ) return model @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = CLIPImageProcessor( crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): torch.manual_seed(0 ) lowercase_ : Optional[Any] = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """embedding_proj_norm_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } lowercase_ : Union[str, Any] = PriorTransformer(**lowercase_ ) return model @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): torch.manual_seed(0 ) lowercase_ : str = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } lowercase_ : int = ShapERenderer(**lowercase_ ) return model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.dummy_prior lowercase_ : Union[str, Any] = self.dummy_image_encoder lowercase_ : Union[str, Any] = self.dummy_image_processor lowercase_ : List[Any] = self.dummy_renderer lowercase_ : List[str] = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowercase_ , clip_sample=lowercase_ , clip_sample_range=1.0 , ) lowercase_ : int = { """prior""": prior, """image_encoder""": image_encoder, """image_processor""": image_processor, """renderer""": renderer, """scheduler""": scheduler, } return components def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[str]=0 ): lowercase_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) if str(lowercase_ ).startswith("""mps""" ): lowercase_ : int = torch.manual_seed(lowercase_ ) else: lowercase_ : Dict = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase_ : int = { """image""": input_image, """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Optional[Any] = """cpu""" lowercase_ : Union[str, Any] = self.get_dummy_components() lowercase_ : Optional[Any] = self.pipeline_class(**lowercase_ ) lowercase_ : Tuple = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : int = pipe(**self.get_dummy_inputs(lowercase_ ) ) lowercase_ : Union[str, Any] = output.images[0] lowercase_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase_ : Any = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : str ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Union[str, Any] = torch_device == """cpu""" lowercase_ : int = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowercase_ , relax_max_difference=lowercase_ , ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = self.get_dummy_components() lowercase_ : Any = self.pipeline_class(**lowercase_ ) lowercase_ : Optional[int] = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : List[str] = 1 lowercase_ : Optional[Any] = 2 lowercase_ : Optional[Any] = self.get_dummy_inputs(lowercase_ ) for key in inputs.keys(): if key in self.batch_params: lowercase_ : Tuple = batch_size * [inputs[key]] lowercase_ : List[Any] = pipe(**lowercase_ , num_images_per_prompt=lowercase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" ) lowercase_ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_img2img_out.npy""" ) lowercase_ : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" ) lowercase_ : List[Any] = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Optional[Any] = torch.Generator(device=lowercase_ ).manual_seed(0 ) lowercase_ : List[Any] = pipe( lowercase_ , generator=lowercase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ )
701
'''simple docstring''' import argparse _lowercase : Optional[int] = "docs/source/_static/js/custom.js" def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict: with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Optional[int] = f.readlines() lowercase_ : Tuple = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += F''' "v{version}": "v{version}",\n''' with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") _lowercase : Dict = parser.parse_args() update_custom_js(args.version)
30
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __magic_name__ ( unittest.TestCase): '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[str]=7 , lowercase_ : Tuple=3 , lowercase_ : List[str]=30 , lowercase_ : Optional[Any]=400 , lowercase_ : List[Any]=True , lowercase_ : int=None , lowercase_ : Union[str, Any]=0.9 , lowercase_ : Union[str, Any]=None , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=[0.5, 0.5, 0.5] , lowercase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ): lowercase_ : str = size if size is not None else {"""shortest_edge""": 30} lowercase_ : List[Any] = crop_size if crop_size is not None else {"""height""": 30, """width""": 30} lowercase_ : Tuple = parent lowercase_ : str = batch_size lowercase_ : Any = num_channels lowercase_ : List[str] = min_resolution lowercase_ : int = max_resolution lowercase_ : str = do_resize_and_center_crop lowercase_ : List[str] = size lowercase_ : List[Any] = crop_pct lowercase_ : Any = crop_size lowercase_ : Dict = do_normalize lowercase_ : Union[str, Any] = image_mean lowercase_ : Optional[Any] = image_std def SCREAMING_SNAKE_CASE_ ( self : str ): return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): '''simple docstring''' UpperCamelCase__ = PoolFormerImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Union[str, Any] = PoolFormerImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """crop_pct""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowercase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowercase_ , """image_std""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 30} ) self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} ) lowercase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): # Initialize image_processing lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image ) # Test not batched input lowercase_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowercase_ : Union[str, Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): # Initialize image_processing lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray ) # Test not batched input lowercase_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowercase_ : Union[str, Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): # Initialize image_processing lowercase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor ) # Test not batched input lowercase_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowercase_ : Dict = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
702
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ): lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : Any = image_size lowercase_ : Optional[Any] = num_channels lowercase_ : Any = embeddings_size lowercase_ : Union[str, Any] = hidden_sizes lowercase_ : Any = depths lowercase_ : Dict = is_training lowercase_ : Tuple = use_labels lowercase_ : str = hidden_act lowercase_ : Optional[Any] = num_labels lowercase_ : Tuple = scope lowercase_ : Any = len(lowercase_ ) lowercase_ : Optional[Any] = out_features lowercase_ : Tuple = out_indices lowercase_ : str = num_groups def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : List[Any] = None if self.use_labels: lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : int = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ): lowercase_ : Optional[int] = BitModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ): lowercase_ : Union[str, Any] = self.num_labels lowercase_ : Tuple = BitForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Any = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ): lowercase_ : Any = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Dict = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase_ : List[str] = None lowercase_ : Dict = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[int] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs lowercase_ : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = BitModelTester(self ) lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return @unittest.skip(reason="""Bit does not output attentions""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[Any] = model_class(lowercase_ ) lowercase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(config=lowercase_ ) for name, module in model.named_modules(): if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : Optional[Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase_ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Dict = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase_ : Union[str, Any] = layer_type lowercase_ : Optional[Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Union[str, Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ ) lowercase_ : int = self.default_image_processor lowercase_ : List[Any] = prepare_img() lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : str = model(**lowercase_ ) # verify the logits lowercase_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitBackbone,) if is_torch_available() else () UpperCamelCase__ = BitConfig UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Union[str, Any] = BitModelTester(self )
30
0
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ ) lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase_ : str = dataset_size < in_memory_max_size else: lowercase_ : List[Any] = False lowercase_ : Any = is_small_dataset(UpperCAmelCase__ ) assert result == expected
703
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge _lowercase : Optional[Any] = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] _lowercase : List[Any] = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowerCamelCase ( ) -> List[str]: lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] ) assert ( pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean() ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Tuple = """rougeLsum""" lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCamelCase ( ) -> List[Any]: lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""] lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) assert score_sep == score_no_sep def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Union[str, Any] = [ """Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""", """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""", ] lowercase_ : List[str] = [ """Margot Frank, died in 1945, a month earlier than previously thought.""", """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of""" """ the final seconds on board Flight 9525.""", ] assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) def lowerCamelCase ( ) -> Union[str, Any]: lowercase_ : Optional[Any] = [ """\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """ ] lowercase_ : List[Any] = [ """ Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .""" ] lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""] lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""] assert new_score > prev_score def lowerCamelCase ( ) -> Tuple: lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" ) lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = calculate_rouge_path( data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
30
0
'''simple docstring''' from __future__ import annotations _lowercase : List[Any] = "Muhammad Umer Farooq" _lowercase : Optional[Any] = "MIT" _lowercase : List[str] = "1.0.0" _lowercase : Any = "Muhammad Umer Farooq" _lowercase : int = "contact@muhammadumerfarooq.me" _lowercase : Optional[Any] = "Alpha" import re from html.parser import HTMLParser from urllib import parse import requests class __magic_name__ ( _UpperCAmelCase): def __init__( self : Tuple , lowercase_ : str ): super().__init__() lowercase_ : list[str] = [] lowercase_ : str = domain def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str , lowercase_ : list[tuple[str, str | None]] ): # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: lowercase_ : List[Any] = parse.urljoin(self.domain , lowercase_ ) self.urls.append(lowercase_ ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: return ".".join(get_sub_domain_name(UpperCAmelCase__ ).split(""".""" )[-2:] ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: return parse.urlparse(UpperCAmelCase__ ).netloc def lowerCamelCase ( UpperCAmelCase__ : str = "https://github.com" ) -> list[str]: lowercase_ : List[str] = get_domain_name(UpperCAmelCase__ ) # Initialize the parser lowercase_ : Dict = Parser(UpperCAmelCase__ ) try: # Open URL lowercase_ : Dict = requests.get(UpperCAmelCase__ ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through lowercase_ : int = set() for link in parser.urls: # open URL. # read = requests.get(link) try: lowercase_ : Dict = requests.get(UpperCAmelCase__ ) # Get the valid email. lowercase_ : Optional[int] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(UpperCAmelCase__ ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Optional[int] = emails_from_url("https://github.com") print(f"""{len(emails)} emails found:""") print("\n".join(sorted(emails)))
704
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''speech_to_text''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ): lowercase_ : List[Any] = vocab_size lowercase_ : str = d_model lowercase_ : List[Any] = encoder_ffn_dim lowercase_ : str = encoder_layers lowercase_ : Dict = encoder_attention_heads lowercase_ : str = decoder_ffn_dim lowercase_ : int = decoder_layers lowercase_ : Any = decoder_attention_heads lowercase_ : Any = dropout lowercase_ : Dict = attention_dropout lowercase_ : Optional[int] = activation_dropout lowercase_ : Any = activation_function lowercase_ : Union[str, Any] = init_std lowercase_ : str = encoder_layerdrop lowercase_ : Optional[int] = decoder_layerdrop lowercase_ : Dict = use_cache lowercase_ : Union[str, Any] = encoder_layers lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True lowercase_ : Dict = max_source_positions lowercase_ : Optional[int] = max_target_positions lowercase_ : Tuple = num_conv_layers lowercase_ : Tuple = list(lowercase_ ) lowercase_ : Union[str, Any] = conv_channels lowercase_ : str = input_feat_per_channel lowercase_ : str = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
30
0
'''simple docstring''' _lowercase : List[Any] = "Input must be a string of 8 numbers plus letter" _lowercase : Any = "TRWAGMYFPDXBNJZSQVHLCKE" def lowerCamelCase ( UpperCAmelCase__ : str ) -> bool: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : str = F'''Expected string as input, found {type(UpperCAmelCase__ ).__name__}''' raise TypeError(UpperCAmelCase__ ) lowercase_ : Tuple = spanish_id.replace("""-""" , """""" ).upper() if len(UpperCAmelCase__ ) != 9: raise ValueError(UpperCAmelCase__ ) try: lowercase_ : Any = int(spanish_id_clean[0:8] ) lowercase_ : List[str] = spanish_id_clean[8] except ValueError as ex: raise ValueError(UpperCAmelCase__ ) from ex if letter.isdigit(): raise ValueError(UpperCAmelCase__ ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
705
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __magic_name__ : def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ): lowercase_ : int = parent lowercase_ : str = batch_size lowercase_ : List[str] = image_size lowercase_ : str = num_channels lowercase_ : List[Any] = patch_size lowercase_ : Optional[Any] = num_frames lowercase_ : Dict = is_training lowercase_ : int = use_labels lowercase_ : List[str] = hidden_size lowercase_ : Dict = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : List[Any] = attention_probs_dropout_prob lowercase_ : Any = attention_type lowercase_ : Union[str, Any] = initializer_range lowercase_ : List[str] = scope lowercase_ : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token lowercase_ : Dict = (image_size // patch_size) ** 2 lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Optional[Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase_ : int = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : int = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) lowercase_ : Any = self.num_labels return config def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ): lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ): lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) # verify the logits shape lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[str] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs lowercase_ : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ): lowercase_ : List[Any] = copy.deepcopy(lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): lowercase_ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : str = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Dict = model_class(lowercase_ ) lowercase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): if not self.has_attentions: pass else: lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : List[str] = True for model_class in self.all_model_classes: lowercase_ : str = self.model_tester.seq_length lowercase_ : int = self.model_tester.num_frames lowercase_ : int = True lowercase_ : Any = False lowercase_ : str = True lowercase_ : int = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : List[str] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase_ : List[str] = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : int = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) lowercase_ : Optional[Any] = len(lowercase_ ) # Check attention is always last and order is fine lowercase_ : Tuple = True lowercase_ : Dict = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + 1 , len(lowercase_ ) ) lowercase_ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ): lowercase_ : List[str] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : Dict = outputs.hidden_states lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase_ ) , lowercase_ ) lowercase_ : List[Any] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[str] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def lowerCamelCase ( ) -> Optional[int]: lowercase_ : List[str] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) lowercase_ : List[Any] = np.load(UpperCAmelCase__ ) return list(UpperCAmelCase__ ) @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : str ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( lowercase_ ) lowercase_ : Optional[Any] = self.default_image_processor lowercase_ : Any = prepare_video() lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : Optional[Any] = model(**lowercase_ ) # verify the logits lowercase_ : Any = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
30
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''speech_to_text''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ): lowercase_ : List[Any] = vocab_size lowercase_ : str = d_model lowercase_ : List[Any] = encoder_ffn_dim lowercase_ : str = encoder_layers lowercase_ : Dict = encoder_attention_heads lowercase_ : str = decoder_ffn_dim lowercase_ : int = decoder_layers lowercase_ : Any = decoder_attention_heads lowercase_ : Any = dropout lowercase_ : Dict = attention_dropout lowercase_ : Optional[int] = activation_dropout lowercase_ : Any = activation_function lowercase_ : Union[str, Any] = init_std lowercase_ : str = encoder_layerdrop lowercase_ : Optional[int] = decoder_layerdrop lowercase_ : Dict = use_cache lowercase_ : Union[str, Any] = encoder_layers lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True lowercase_ : Dict = max_source_positions lowercase_ : Optional[int] = max_target_positions lowercase_ : Tuple = num_conv_layers lowercase_ : Tuple = list(lowercase_ ) lowercase_ : Union[str, Any] = conv_channels lowercase_ : str = input_feat_per_channel lowercase_ : str = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
706
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig _lowercase : Tuple = logging.get_logger(__name__) # General docstring _lowercase : List[str] = "RegNetConfig" # Base docstring _lowercase : Dict = "facebook/regnet-y-040" _lowercase : Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring _lowercase : Optional[Any] = "facebook/regnet-y-040" _lowercase : Union[str, Any] = "tabby, tabby cat" _lowercase : str = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __magic_name__ ( nn.Module): def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ): super().__init__() lowercase_ : List[Any] = nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , ) lowercase_ : str = nn.BatchNormad(lowercase_ ) lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ): lowercase_ : Dict = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : List[Any] , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : str = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowercase_ : Any = config.num_channels def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ): lowercase_ : List[str] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) lowercase_ : Any = self.embedder(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ): super().__init__() lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ ) lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ): lowercase_ : Tuple = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : int , lowercase_ : int ): super().__init__() lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) lowercase_ : int = nn.Sequential( nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ): # b c h w -> b c 1 1 lowercase_ : List[str] = self.pooler(lowercase_ ) lowercase_ : Optional[int] = self.attention(lowercase_ ) lowercase_ : Any = hidden_state * attention return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : List[Any] = in_channels != out_channels or stride != 1 lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width ) lowercase_ : Dict = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : List[Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : int = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ): lowercase_ : Any = hidden_state lowercase_ : Union[str, Any] = self.layer(lowercase_ ) lowercase_ : Union[str, Any] = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : str = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : str = in_channels != out_channels or stride != 1 lowercase_ : int = max(1 , out_channels // config.groups_width ) lowercase_ : int = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : Union[str, Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : Optional[int] = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): lowercase_ : Optional[int] = hidden_state lowercase_ : str = self.layer(lowercase_ ) lowercase_ : int = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ): super().__init__() lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer lowercase_ : str = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ): lowercase_ : Tuple = self.layers(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Dict , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : Optional[Any] = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ): self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ): lowercase_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ : Union[str, Any] = hidden_states + (hidden_state,) lowercase_ : Dict = stage_module(lowercase_ ) if output_hidden_states: lowercase_ : Optional[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = RegNetConfig UpperCamelCase__ = '''regnet''' UpperCamelCase__ = '''pixel_values''' UpperCamelCase__ = True def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ): if isinstance(lowercase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ): if isinstance(lowercase_ , lowercase_ ): lowercase_ : List[str] = value _lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Any , lowercase_ : Any ): super().__init__(lowercase_ ) lowercase_ : List[str] = config lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ ) lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ ) lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ): lowercase_ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : str = self.embedder(lowercase_ ) lowercase_ : Optional[Any] = self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : List[Any] = encoder_outputs[0] lowercase_ : str = self.pooler(lowercase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Dict , lowercase_ : str ): super().__init__(lowercase_ ) lowercase_ : Any = config.num_labels lowercase_ : List[str] = RegNetModel(lowercase_ ) # classification head lowercase_ : Any = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ): lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1] lowercase_ : List[Any] = self.classifier(lowercase_ ) lowercase_ : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase_ : Optional[int] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase_ : str = """single_label_classification""" else: lowercase_ : str = """multi_label_classification""" if self.config.problem_type == "regression": lowercase_ : str = MSELoss() if self.num_labels == 1: lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ ) elif self.config.problem_type == "single_label_classification": lowercase_ : Optional[int] = CrossEntropyLoss() lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase_ : Dict = BCEWithLogitsLoss() lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ ) if not return_dict: lowercase_ : Tuple = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
30
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = GPTaTokenizer UpperCamelCase__ = GPTaTokenizerFast UpperCamelCase__ = True UpperCamelCase__ = {'''add_prefix_space''': True} UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Tuple ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ : Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] lowercase_ : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase_ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowercase_ : Tuple = {"""unk_token""": """<unk>"""} lowercase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowercase_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , **lowercase_ : int ): kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : List[Any] ): kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Any ): lowercase_ : Dict = """lower newer""" lowercase_ : int = """lower newer""" return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase_ : Union[str, Any] = """lower newer""" lowercase_ : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] lowercase_ : Optional[int] = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = tokens + [tokenizer.unk_token] lowercase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): if not self.test_rust_tokenizer: return lowercase_ : Tuple = self.get_tokenizer() lowercase_ : List[str] = self.get_rust_tokenizer(add_prefix_space=lowercase_ ) lowercase_ : List[str] = """lower newer""" # Testing tokenization lowercase_ : Dict = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ ) lowercase_ : int = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) # Testing conversion to ids without special tokens lowercase_ : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ ) lowercase_ : Optional[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) # Testing conversion to ids with special tokens lowercase_ : Dict = self.get_rust_tokenizer(add_prefix_space=lowercase_ ) lowercase_ : List[str] = tokenizer.encode(lowercase_ , add_prefix_space=lowercase_ ) lowercase_ : Dict = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) # Testing the unknown token lowercase_ : str = tokens + [rust_tokenizer.unk_token] lowercase_ : Any = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , *lowercase_ : str , **lowercase_ : Optional[int] ): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[Any]=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) # Simple input lowercase_ : Dict = """This is a simple input""" lowercase_ : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase_ : Tuple = ("""This is a simple input""", """This is a pair""") lowercase_ : Any = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="""max_length""" ) # Simple input self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" ) # Simple input self.assertRaises( lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" , ) # Pair input self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="""max_length""" ) # Pair input self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" ) # Pair input self.assertRaises( lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" , ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input lowercase_ : str = """This is a simple input""" lowercase_ : str = ["""This is a simple input looooooooong""", """This is a simple input"""] lowercase_ : Optional[int] = ("""This is a simple input""", """This is a pair""") lowercase_ : Dict = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] lowercase_ : int = tokenizer.pad_token_id lowercase_ : Any = tokenizer(lowercase_ , padding="""max_length""" , max_length=30 , return_tensors="""np""" ) lowercase_ : int = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="""np""" ) lowercase_ : str = tokenizer(*lowercase_ , padding="""max_length""" , max_length=60 , return_tensors="""np""" ) lowercase_ : Optional[Any] = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = """$$$""" lowercase_ : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase_ , add_bos_token=lowercase_ ) lowercase_ : List[Any] = """This is a simple input""" lowercase_ : str = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase_ : int = tokenizer.bos_token_id lowercase_ : Dict = tokenizer(lowercase_ ) lowercase_ : Tuple = tokenizer(lowercase_ ) self.assertEqual(out_s.input_ids[0] , lowercase_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase_ : List[Any] = tokenizer.decode(out_s.input_ids ) lowercase_ : Dict = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , lowercase_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): # TODO: change to self.get_tokenizers() when the fast version is implemented lowercase_ : Tuple = [self.get_tokenizer(do_lower_case=lowercase_ , add_bos_token=lowercase_ )] for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): lowercase_ : int = """Encode this.""" lowercase_ : Tuple = """This one too please.""" lowercase_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) encoded_sequence += tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) lowercase_ : List[Any] = tokenizer.encode_plus( lowercase_ , lowercase_ , add_special_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , ) lowercase_ : int = encoded_sequence_dict["""input_ids"""] lowercase_ : List[str] = encoded_sequence_dict["""special_tokens_mask"""] self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) lowercase_ : int = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase_ ) ] lowercase_ : List[str] = [x for x in filtered_sequence if x is not None] self.assertEqual(lowercase_ , lowercase_ ) @require_tokenizers class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : List[str] ): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 lowercase_ : str = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase_ ) lowercase_ : Optional[int] = """A photo of a cat""" lowercase_ : Any = tokenizer.encode( lowercase_ , ) self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("""test_opt""" ) lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""./test_opt""" ) lowercase_ : List[Any] = tokenizer.encode( lowercase_ , ) self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : int = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=lowercase_ ) lowercase_ : List[Any] = """A photo of a cat""" lowercase_ : List[Any] = tokenizer.encode( lowercase_ , ) # Same as above self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip("""This test is failing because of a bug in the fast tokenizer""" ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase_ ) lowercase_ : Union[str, Any] = """bos""" lowercase_ : int = tokenizer.get_vocab()["""bos"""] lowercase_ : Union[str, Any] = """A photo of a cat""" lowercase_ : Dict = tokenizer.encode( lowercase_ , ) # We changed the bos token self.assertEqual(lowercase_ , [31957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("""./tok""" ) lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""./tok""" ) self.assertTrue(tokenizer.is_fast ) lowercase_ : Tuple = tokenizer.encode( lowercase_ , ) self.assertEqual(lowercase_ , [31957, 250, 1345, 9, 10, 4758] )
707
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _lowercase : str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __magic_name__ ( datasets.BuilderConfig): UpperCamelCase__ = None def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str: import pyspark def generate_fn(): lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" ) lowercase_ : Any = partition_df.collect() lowercase_ : Dict = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class __magic_name__ ( _BaseExamplesIterable): def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ): lowercase_ : Dict = df lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ): yield from self.generate_examples_fn() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ): lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return len(self.partition_order ) class __magic_name__ ( datasets.DatasetBuilder): UpperCamelCase__ = SparkConfig def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ): import pyspark lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowercase_ : Optional[int] = df lowercase_ : List[str] = working_dir super().__init__( cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , ) def SCREAMING_SNAKE_CASE_ ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(lowercase_ : str ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase_ ) lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase_ , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowercase_ : str = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): import pyspark def get_arrow_batch_size(lowercase_ : Any ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) lowercase_ : Union[str, Any] = self.df.count() lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowercase_ : Any = ( self.df.limit(lowercase_ ) .repartition(1 ) .mapInArrow(lowercase_ , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) ) lowercase_ : Any = self.df.repartition(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ): import pyspark lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath lowercase_ : Optional[Any] = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowercase_ : Tuple = self.config.features lowercase_ : Any = self._writer_batch_size lowercase_ : List[str] = self._fs.storage_options def write_arrow(lowercase_ : str ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId() lowercase_ : Dict = next(lowercase_ , lowercase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) lowercase_ : int = 0 lowercase_ : List[Any] = writer_class( features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowercase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 lowercase_ : Any = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : List[str] = pa.Table.from_batches([batch] ) writer.write_table(lowercase_ ) if writer._num_bytes > 0: lowercase_ : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase_ ) ): lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) ) shutil.move(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = ( self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ): self._validate_cache_dir() lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase_ ) lowercase_ : Tuple = not is_remote_filesystem(self._fs ) lowercase_ : int = os.path.join if is_local else posixpath.join lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN""" lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ ) lowercase_ : Any = 0 lowercase_ : Tuple = 0 lowercase_ : int = 0 lowercase_ : Dict = [] lowercase_ : Union[str, Any] = [] for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ): ( lowercase_ ) : Union[str, Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase_ ) lowercase_ : List[str] = total_num_examples lowercase_ : int = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: lowercase_ : Tuple = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowercase_ : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowercase_ : int , lowercase_ : int , lowercase_ : int , ): rename( lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , ) lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 0 for i in range(len(lowercase_ ) ): lowercase_ : List[Any] = task_id_and_num_shards[i] for shard_id in range(lowercase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect() else: # don't use any pattern lowercase_ : List[str] = 0 lowercase_ : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
708
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ ) lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase_ : str = dataset_size < in_memory_max_size else: lowercase_ : List[Any] = False lowercase_ : Any = is_small_dataset(UpperCAmelCase__ ) assert result == expected
30
0
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": _lowercase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--pipeline_type", default=None, type=str, help=( "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'" ". If `None` pipeline will be automatically inferred." ), ) parser.add_argument( "--image_size", default=None, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--prediction_type", default=None, type=str, help=( "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable" " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") parser.add_argument( "--stable_unclip", type=str, default=None, required=False, help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.", ) parser.add_argument( "--stable_unclip_prior", type=str, default=None, required=False, help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.", ) parser.add_argument( "--clip_stats_path", type=str, help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.", required=False, ) parser.add_argument( "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint." ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--vae_path", type=str, default=None, required=False, help="Set to a path, hub id to an already converted vae to not convert it again.", ) _lowercase : Union[str, Any] = parser.parse_args() _lowercase : Tuple = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
709
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ) lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" ) lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy() lowercase_ : Optional[int] = -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
30
0
'''simple docstring''' _lowercase : Optional[int] = "2.13.1" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _lowercase : List[str] = concatenate_datasets _lowercase : Any = DownloadConfig _lowercase : List[Any] = DownloadManager _lowercase : Union[str, Any] = DownloadMode _lowercase : Any = DownloadConfig _lowercase : str = DownloadMode _lowercase : Tuple = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
710
'''simple docstring''' from collections.abc import Callable import numpy as np def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array: lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) ) lowercase_ : List[Any] = np.zeros((n + 1,) ) lowercase_ : List[Any] = ya lowercase_ : List[str] = xa for k in range(UpperCAmelCase__ ): lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] ) lowercase_ : List[Any] = y[k] + ( (step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging _lowercase : List[str] = logging.get_logger(__name__) _lowercase : Union[str, Any] = { "deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''perceiver''' def __init__( self : str , lowercase_ : Union[str, Any]=256 , lowercase_ : Dict=1280 , lowercase_ : List[str]=768 , lowercase_ : int=1 , lowercase_ : Any=26 , lowercase_ : Tuple=8 , lowercase_ : Any=8 , lowercase_ : int=None , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]="kv" , lowercase_ : Optional[int]=1 , lowercase_ : str=1 , lowercase_ : Tuple="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=1E-12 , lowercase_ : List[Any]=True , lowercase_ : List[str]=262 , lowercase_ : Tuple=2048 , lowercase_ : int=56 , lowercase_ : int=[368, 496] , lowercase_ : Tuple=16 , lowercase_ : List[Any]=1920 , lowercase_ : Optional[Any]=16 , lowercase_ : Tuple=[1, 16, 224, 224] , **lowercase_ : Tuple , ): super().__init__(**lowercase_ ) lowercase_ : Any = num_latents lowercase_ : Dict = d_latents lowercase_ : Tuple = d_model lowercase_ : List[Any] = num_blocks lowercase_ : Optional[Any] = num_self_attends_per_block lowercase_ : Tuple = num_self_attention_heads lowercase_ : Any = num_cross_attention_heads lowercase_ : Union[str, Any] = qk_channels lowercase_ : Union[str, Any] = v_channels lowercase_ : Optional[Any] = cross_attention_shape_for_attention lowercase_ : Any = self_attention_widening_factor lowercase_ : int = cross_attention_widening_factor lowercase_ : Optional[int] = hidden_act lowercase_ : Any = attention_probs_dropout_prob lowercase_ : int = initializer_range lowercase_ : int = layer_norm_eps lowercase_ : Optional[int] = use_query_residual # masked language modeling attributes lowercase_ : Dict = vocab_size lowercase_ : Dict = max_position_embeddings # image classification attributes lowercase_ : Any = image_size # flow attributes lowercase_ : str = train_size # multimodal autoencoding attributes lowercase_ : Dict = num_frames lowercase_ : Optional[Any] = audio_samples_per_frame lowercase_ : Optional[Any] = samples_per_patch lowercase_ : int = output_shape class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): if self.task == "multiple-choice": lowercase_ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : int = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""inputs""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return 1E-4 def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(lowercase_ , lowercase_ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase_ : Dict = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase_ : int = preprocessor.num_special_tokens_to_add(lowercase_ ) lowercase_ : Optional[Any] = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence lowercase_ : Optional[int] = [""" """.join(["""a"""] ) * seq_length] * batch_size lowercase_ : List[Any] = dict(preprocessor(lowercase_ , return_tensors=lowercase_ ) ) lowercase_ : List[Any] = inputs.pop("""input_ids""" ) return inputs elif isinstance(lowercase_ , lowercase_ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase_ : int = compute_effective_axis_dimension(lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch ) lowercase_ : Union[str, Any] = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase_ : List[Any] = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) ) lowercase_ : str = inputs.pop("""pixel_values""" ) return inputs else: raise ValueError( """Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
711
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Dict = (1 - _cos) / 2 lowercase_ : Optional[int] = 1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : List[Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Optional[int] = sin(UpperCAmelCase__ ) lowercase_ : Dict = cos(UpperCAmelCase__ ) lowercase_ : Optional[int] = _sin / (2 * q_factor) lowercase_ : Dict = (1 + _cos) / 2 lowercase_ : str = -1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : List[Any] = 1 - alpha lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : int = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ ) lowercase_ : str = _sin / (2 * q_factor) lowercase_ : str = _sin / 2 lowercase_ : Any = 0 lowercase_ : Optional[Any] = -ba lowercase_ : Dict = 1 + alpha lowercase_ : Union[str, Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : List[str] = tau * frequency / samplerate lowercase_ : Any = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : Optional[Any] = _sin / (2 * q_factor) lowercase_ : Any = 1 - alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : Optional[int] = 1 + alpha lowercase_ : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : List[Any] = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : List[str] = 1 + alpha * big_a lowercase_ : List[Any] = -2 * _cos lowercase_ : Dict = 1 - alpha * big_a lowercase_ : str = 1 + alpha / big_a lowercase_ : List[str] = -2 * _cos lowercase_ : Tuple = 1 - alpha / big_a lowercase_ : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ ) lowercase_ : Any = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (pmc + aaa) lowercase_ : List[str] = 2 * big_a * mpc lowercase_ : Union[str, Any] = big_a * (pmc - aaa) lowercase_ : Optional[int] = ppmc + aaa lowercase_ : Optional[int] = -2 * pmpc lowercase_ : Any = ppmc - aaa lowercase_ : Optional[int] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Dict = _sin / (2 * q_factor) lowercase_ : Union[str, Any] = 10 ** (gain_db / 40) lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (ppmc + aaa) lowercase_ : List[Any] = -2 * big_a * pmpc lowercase_ : Optional[Any] = big_a * (ppmc - aaa) lowercase_ : Optional[Any] = pmc + aaa lowercase_ : int = 2 * mpc lowercase_ : Tuple = pmc - aaa lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
30
0
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : def __init__( self : Any , lowercase_ : Dict , lowercase_ : Dict=13 , lowercase_ : int=30 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=3 , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : Tuple=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Tuple=4 , lowercase_ : Optional[int]=37 , lowercase_ : int="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=10 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : str=0.6 , lowercase_ : List[str]=None , ): lowercase_ : Union[str, Any] = parent lowercase_ : Any = batch_size lowercase_ : Optional[Any] = image_size lowercase_ : str = patch_size lowercase_ : Union[str, Any] = num_channels lowercase_ : List[str] = is_training lowercase_ : Tuple = use_labels lowercase_ : str = hidden_size lowercase_ : Optional[Any] = num_hidden_layers lowercase_ : List[str] = num_attention_heads lowercase_ : str = intermediate_size lowercase_ : List[str] = hidden_act lowercase_ : Tuple = hidden_dropout_prob lowercase_ : List[str] = attention_probs_dropout_prob lowercase_ : Union[str, Any] = type_sequence_label_size lowercase_ : int = initializer_range lowercase_ : List[Any] = mask_ratio lowercase_ : Optional[int] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase_ : Union[str, Any] = (image_size // patch_size) ** 2 lowercase_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Union[str, Any] = None if self.use_labels: lowercase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : str ): lowercase_ : Optional[Any] = ViTMAEModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[str] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : int ): lowercase_ : List[str] = ViTMAEForPreTraining(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Union[str, Any] = model(lowercase_ ) lowercase_ : Tuple = (self.image_size // self.patch_size) ** 2 lowercase_ : Tuple = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase_ : Any = 1 lowercase_ : int = ViTMAEForPreTraining(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ : Optional[int] = model(lowercase_ ) lowercase_ : List[str] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[Any] = self.prepare_config_and_inputs() lowercase_ : Union[str, Any] = config_and_inputs lowercase_ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () UpperCamelCase__ = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = ViTMAEModelTester(self ) lowercase_ : Any = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[int] = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Tuple = model_class(lowercase_ ) lowercase_ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Dict = [*signature.parameters.keys()] lowercase_ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : str ): # make masks reproducible np.random.seed(2 ) lowercase_ : List[str] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowercase_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase_ : Union[str, Any] = torch.from_numpy(lowercase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase_ : Optional[Any] = pt_noise super().check_pt_tf_models(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Union[str, Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : List[str] = outputs[0].cpu().numpy() lowercase_ : int = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Any = model_class.from_pretrained(lowercase_ ) model.to(lowercase_ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowercase_ : Any = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) # Make sure we don't have nans lowercase_ : int = after_outputs[0].cpu().numpy() lowercase_ : Optional[int] = 0 lowercase_ : List[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def SCREAMING_SNAKE_CASE_ ( self : int ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def SCREAMING_SNAKE_CASE_ ( self : str ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): pass @slow def SCREAMING_SNAKE_CASE_ ( self : int ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : str = ViTMAEModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowerCamelCase ( ) -> Optional[int]: lowercase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowercase_ : Dict = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowercase_ ) lowercase_ : Tuple = self.default_image_processor lowercase_ : int = prepare_img() lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase_ : Union[str, Any] = ViTMAEConfig() lowercase_ : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase_ : Union[str, Any] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowercase_ : Any = model(**lowercase_ , noise=torch.from_numpy(lowercase_ ).to(device=lowercase_ ) ) # verify the logits lowercase_ : List[Any] = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : str = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowercase_ ) , atol=1E-4 ) )
712
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _lowercase : str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __magic_name__ ( datasets.BuilderConfig): UpperCamelCase__ = None def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str: import pyspark def generate_fn(): lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" ) lowercase_ : Any = partition_df.collect() lowercase_ : Dict = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class __magic_name__ ( _BaseExamplesIterable): def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ): lowercase_ : Dict = df lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ): yield from self.generate_examples_fn() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ): lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return len(self.partition_order ) class __magic_name__ ( datasets.DatasetBuilder): UpperCamelCase__ = SparkConfig def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ): import pyspark lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowercase_ : Optional[int] = df lowercase_ : List[str] = working_dir super().__init__( cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , ) def SCREAMING_SNAKE_CASE_ ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(lowercase_ : str ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase_ ) lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase_ , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowercase_ : str = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): import pyspark def get_arrow_batch_size(lowercase_ : Any ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) lowercase_ : Union[str, Any] = self.df.count() lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowercase_ : Any = ( self.df.limit(lowercase_ ) .repartition(1 ) .mapInArrow(lowercase_ , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) ) lowercase_ : Any = self.df.repartition(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ): import pyspark lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath lowercase_ : Optional[Any] = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowercase_ : Tuple = self.config.features lowercase_ : Any = self._writer_batch_size lowercase_ : List[str] = self._fs.storage_options def write_arrow(lowercase_ : str ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId() lowercase_ : Dict = next(lowercase_ , lowercase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) lowercase_ : int = 0 lowercase_ : List[Any] = writer_class( features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowercase_ , lowercase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 lowercase_ : Any = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : List[str] = pa.Table.from_batches([batch] ) writer.write_table(lowercase_ ) if writer._num_bytes > 0: lowercase_ , lowercase_ : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase_ ) ): lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) ) shutil.move(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = ( self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ): self._validate_cache_dir() lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase_ ) lowercase_ : Tuple = not is_remote_filesystem(self._fs ) lowercase_ : int = os.path.join if is_local else posixpath.join lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN""" lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ ) lowercase_ : Any = 0 lowercase_ : Tuple = 0 lowercase_ : int = 0 lowercase_ : Dict = [] lowercase_ : Union[str, Any] = [] for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ): ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Union[str, Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase_ ) lowercase_ : List[str] = total_num_examples lowercase_ : int = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: lowercase_ : Tuple = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowercase_ : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowercase_ : int , lowercase_ : int , lowercase_ : int , ): rename( lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , ) lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 0 for i in range(len(lowercase_ ) ): lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i] for shard_id in range(lowercase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect() else: # don't use any pattern lowercase_ : List[str] = 0 lowercase_ : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
30
0
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True}) UpperCamelCase__ = Features({'''image''': Image()}) UpperCamelCase__ = Features({'''labels''': ClassLabel}) UpperCamelCase__ = '''image''' UpperCamelCase__ = '''labels''' def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowercase_ : List[str] = copy.deepcopy(self ) lowercase_ : List[str] = self.label_schema.copy() lowercase_ : List[Any] = features[self.label_column] lowercase_ : Optional[Any] = label_schema return task_template @property def SCREAMING_SNAKE_CASE_ ( self : int ): return { self.image_column: "image", self.label_column: "labels", }
713
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Dict = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]="pt" ): lowercase_ : Optional[Any] = {"""add_prefix_space""": True} if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not line.startswith(""" """ ) else {} lowercase_ : List[str] = padding_side return tokenizer( [line] , max_length=UpperCAmelCase__ , padding="""max_length""" if pad_to_max_length else None , truncation=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , ) def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]=None , ): lowercase_ : Union[str, Any] = input_ids.ne(UpperCAmelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __magic_name__ ( _UpperCAmelCase): def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Any="train" , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]="" , ): super().__init__() lowercase_ : Union[str, Any] = Path(lowercase_ ).joinpath(type_path + """.source""" ) lowercase_ : Union[str, Any] = Path(lowercase_ ).joinpath(type_path + """.target""" ) lowercase_ : Any = self.get_char_lens(self.src_file ) lowercase_ : Tuple = max_source_length lowercase_ : str = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' lowercase_ : Optional[Any] = tokenizer lowercase_ : str = prefix if n_obs is not None: lowercase_ : Any = self.src_lens[:n_obs] lowercase_ : Tuple = src_lang lowercase_ : int = tgt_lang def __len__( self : Optional[Any] ): return len(self.src_lens ) def __getitem__( self : Optional[int] , lowercase_ : List[str] ): lowercase_ : List[str] = index + 1 # linecache starts at 1 lowercase_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , lowercase_ ).rstrip("""\n""" ) lowercase_ : List[Any] = linecache.getline(str(self.tgt_file ) , lowercase_ ).rstrip("""\n""" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowercase_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right lowercase_ : Optional[Any] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase_ ) else self.tokenizer ) lowercase_ : Tuple = self.tokenizer.generator if isinstance(self.tokenizer , lowercase_ ) else self.tokenizer lowercase_ : str = encode_line(lowercase_ , lowercase_ , self.max_source_length , """right""" ) lowercase_ : Optional[int] = encode_line(lowercase_ , lowercase_ , self.max_target_length , """right""" ) lowercase_ : Any = source_inputs["""input_ids"""].squeeze() lowercase_ : Dict = target_inputs["""input_ids"""].squeeze() lowercase_ : int = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def SCREAMING_SNAKE_CASE_ ( lowercase_ : int ): return [len(lowercase_ ) for x in Path(lowercase_ ).open().readlines()] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[str] ): lowercase_ : Any = torch.stack([x["""input_ids"""] for x in batch] ) lowercase_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) lowercase_ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] ) lowercase_ : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowercase_ ) else self.tokenizer.pad_token_id ) lowercase_ : Union[str, Any] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowercase_ ) else self.tokenizer.pad_token_id ) lowercase_ : Tuple = trim_batch(lowercase_ , lowercase_ ) lowercase_ : List[str] = trim_batch(lowercase_ , lowercase_ , attention_mask=lowercase_ ) lowercase_ : str = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch _lowercase : Union[str, Any] = getLogger(__name__) def lowerCamelCase ( UpperCAmelCase__ : List[List] ): return list(itertools.chain.from_iterable(UpperCAmelCase__ ) ) def lowerCamelCase ( UpperCAmelCase__ : str ): lowercase_ : Dict = get_git_info() save_json(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , """git_log.json""" ) ) def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=4 , **UpperCAmelCase__ : List[Any] ): with open(UpperCAmelCase__ , """w""" ) as f: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , indent=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : str ): with open(UpperCAmelCase__ ) as f: return json.load(UpperCAmelCase__ ) def lowerCamelCase ( ): lowercase_ : Union[str, Any] = git.Repo(search_parent_directories=UpperCAmelCase__ ) lowercase_ : str = { """repo_id""": str(UpperCAmelCase__ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : Iterable ): return list(map(UpperCAmelCase__ , UpperCAmelCase__ ) ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ): with open(UpperCAmelCase__ , """wb""" ) as f: return pickle.dump(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] ): def remove_articles(UpperCAmelCase__ : List[str] ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , UpperCAmelCase__ ) def white_space_fix(UpperCAmelCase__ : str ): return " ".join(text.split() ) def remove_punc(UpperCAmelCase__ : Tuple ): lowercase_ : List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCAmelCase__ : Optional[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ): lowercase_ : Optional[int] = normalize_answer(UpperCAmelCase__ ).split() lowercase_ : Union[str, Any] = normalize_answer(UpperCAmelCase__ ).split() lowercase_ : Optional[Any] = Counter(UpperCAmelCase__ ) & Counter(UpperCAmelCase__ ) lowercase_ : List[str] = sum(common.values() ) if num_same == 0: return 0 lowercase_ : Dict = 1.0 * num_same / len(UpperCAmelCase__ ) lowercase_ : str = 1.0 * num_same / len(UpperCAmelCase__ ) lowercase_ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any ): return normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) lowercase_ : str = 0 for hypo, pred in zip(UpperCAmelCase__ , UpperCAmelCase__ ): em += exact_match_score(UpperCAmelCase__ , UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 0: em /= len(UpperCAmelCase__ ) return {"em": em} def lowerCamelCase ( UpperCAmelCase__ : Any ): return model_prefix.startswith("""rag""" ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ): lowercase_ : List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead lowercase_ : List[Any] = """dropout_rate""" for p in extra_params: if getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): if not hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) and not hasattr(UpperCAmelCase__ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(UpperCAmelCase__ ) ) delattr(UpperCAmelCase__ , UpperCAmelCase__ ) continue lowercase_ : Dict = p if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) else equivalent_param[p] setattr(UpperCAmelCase__ , UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) delattr(UpperCAmelCase__ , UpperCAmelCase__ ) return hparams, config
714
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def lowerCamelCase ( ) -> None: lowercase_ : List[Any] = input("""Enter message: """ ) lowercase_ : str = input("""Enter key [alphanumeric]: """ ) lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): lowercase_ : List[str] = """encrypt""" lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) elif mode.lower().startswith("""d""" ): lowercase_ : Any = """decrypt""" lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''\n{mode.title()}ed message:''' ) print(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: lowercase_ : Union[str, Any] = [] lowercase_ : List[Any] = 0 lowercase_ : str = key.upper() for symbol in message: lowercase_ : Tuple = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(UpperCAmelCase__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(UpperCAmelCase__ ): lowercase_ : Any = 0 else: translated.append(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) if __name__ == "__main__": main()
30
0
'''simple docstring''' import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _lowercase : Tuple = "scheduler_config.json" class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = 1 UpperCamelCase__ = 2 UpperCamelCase__ = 3 UpperCamelCase__ = 4 UpperCamelCase__ = 5 UpperCamelCase__ = 6 UpperCamelCase__ = 7 UpperCamelCase__ = 8 UpperCamelCase__ = 9 UpperCamelCase__ = 10 UpperCamelCase__ = 11 UpperCamelCase__ = 12 UpperCamelCase__ = 13 UpperCamelCase__ = 14 @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = 42 class __magic_name__ : UpperCamelCase__ = SCHEDULER_CONFIG_NAME UpperCamelCase__ = [] UpperCamelCase__ = True @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , lowercase_ : Dict[str, Any] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[int]=False , **lowercase_ : int , ): lowercase_ : Tuple = cls.load_config( pretrained_model_name_or_path=lowercase_ , subfolder=lowercase_ , return_unused_kwargs=lowercase_ , return_commit_hash=lowercase_ , **lowercase_ , ) return cls.from_config(lowercase_ , return_unused_kwargs=lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, os.PathLike] , lowercase_ : bool = False , **lowercase_ : str ): self.save_config(save_directory=lowercase_ , push_to_hub=lowercase_ , **lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int ): lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) ) lowercase_ : Optional[Any] = importlib.import_module(__name__.split(""".""" )[0] ) lowercase_ : Dict = [ getattr(lowercase_ , lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ , lowercase_ ) ] return compatible_classes
715
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = """ylacombe/bark-small""" lowercase_ : List[str] = tempfile.mkdtemp() lowercase_ : Tuple = """en_speaker_1""" lowercase_ : Union[str, Any] = """This is a test string""" lowercase_ : int = """speaker_embeddings_path.json""" lowercase_ : Any = """speaker_embeddings""" def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ): return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = self.get_tokenizer() lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase_ : Optional[int] = 35 lowercase_ : int = 2 lowercase_ : Union[str, Any] = 8 lowercase_ : Union[str, Any] = { """semantic_prompt""": np.ones(lowercase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : Dict = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : List[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_tokenizer() lowercase_ : int = BarkProcessor(tokenizer=lowercase_ ) lowercase_ : Any = processor(text=self.input_string ) lowercase_ : List[str] = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str: return "\n".join( F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
716
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True}) UpperCamelCase__ = Features({'''image''': Image()}) UpperCamelCase__ = Features({'''labels''': ClassLabel}) UpperCamelCase__ = "image" UpperCamelCase__ = "labels" def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowercase_ : List[str] = copy.deepcopy(self ) lowercase_ : List[str] = self.label_schema.copy() lowercase_ : List[Any] = features[self.label_column] lowercase_ : Optional[Any] = label_schema return task_template @property def SCREAMING_SNAKE_CASE_ ( self : int ): return { self.image_column: "image", self.label_column: "labels", }
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]: lowercase_ : Optional[int] = [False] * len(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = [-1] * len(UpperCAmelCase__ ) def dfs(UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ): lowercase_ : List[str] = True lowercase_ : str = c for u in graph[v]: if not visited[u]: dfs(UpperCAmelCase__ , 1 - c ) for i in range(len(UpperCAmelCase__ ) ): if not visited[i]: dfs(UpperCAmelCase__ , 0 ) for i in range(len(UpperCAmelCase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _lowercase : List[str] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
717
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowercase_ : str = 1.5 lowercase_ : List[Any] = int(factor * num_class_images ) lowercase_ : int = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ : List[str] = client.query(text=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase_ : List[str] = int(factor * num_images ) lowercase_ : List[str] = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , ) lowercase_ : List[str] = 0 lowercase_ : Dict = 0 lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open( F'''{class_data_dir}/images.txt''' , """w""" ) as fa: while total < num_class_images: lowercase_ : str = class_images[count] count += 1 try: lowercase_ : Union[str, Any] = requests.get(images["""url"""] ) if img.status_code == 200: lowercase_ : List[str] = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ ) return parser.parse_args() if __name__ == "__main__": _lowercase : Dict = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: while a != 0: lowercase_ : Any = b % a, a return b def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: if gcd(UpperCAmelCase__ , UpperCAmelCase__ ) != 1: lowercase_ : Optional[Any] = F'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(UpperCAmelCase__ ) lowercase_ : str = 1, 0, a lowercase_ : Tuple = 0, 1, m while va != 0: lowercase_ : List[Any] = ua // va lowercase_ : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
718
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None: if start is None: lowercase_ : Any = 0 if end is None: lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1 if start >= end: return lowercase_ : Optional[int] = (start + end) // 2 slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ ) if sequence[end] < sequence[mid]: lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end] slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
30
0
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __magic_name__ ( nn.Module): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 0.0 UpperCamelCase__ = 1 UpperCamelCase__ = 1 UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Union[str, Any] = [] lowercase_ : Union[str, Any] = [] for i in range(self.num_layers ): lowercase_ : Dict = self.in_channels if i == 0 else self.out_channels lowercase_ : Tuple = FlaxResnetBlockaD( in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_ ) lowercase_ : Any = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowercase_ ) lowercase_ : Union[str, Any] = resnets lowercase_ : Any = attentions if self.add_downsample: lowercase_ : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Optional[Any]=True ): lowercase_ : int = () for resnet, attn in zip(self.resnets , self.attentions ): lowercase_ : Optional[Any] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ ) lowercase_ : Tuple = attn(lowercase_ , lowercase_ , deterministic=lowercase_ ) output_states += (hidden_states,) if self.add_downsample: lowercase_ : Dict = self.downsamplers_a(lowercase_ ) output_states += (hidden_states,) return hidden_states, output_states class __magic_name__ ( nn.Module): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 0.0 UpperCamelCase__ = 1 UpperCamelCase__ = True UpperCamelCase__ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = [] for i in range(self.num_layers ): lowercase_ : Union[str, Any] = self.in_channels if i == 0 else self.out_channels lowercase_ : str = FlaxResnetBlockaD( in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_ ) lowercase_ : str = resnets if self.add_downsample: lowercase_ : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=True ): lowercase_ : int = () for resnet in self.resnets: lowercase_ : str = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ ) output_states += (hidden_states,) if self.add_downsample: lowercase_ : List[str] = self.downsamplers_a(lowercase_ ) output_states += (hidden_states,) return hidden_states, output_states class __magic_name__ ( nn.Module): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 0.0 UpperCamelCase__ = 1 UpperCamelCase__ = 1 UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Tuple = [] lowercase_ : str = [] for i in range(self.num_layers ): lowercase_ : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels lowercase_ : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels lowercase_ : int = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_ ) lowercase_ : str = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowercase_ ) lowercase_ : List[Any] = resnets lowercase_ : Dict = attentions if self.add_upsample: lowercase_ : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Any=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states lowercase_ : Dict = res_hidden_states_tuple[-1] lowercase_ : List[Any] = res_hidden_states_tuple[:-1] lowercase_ : int = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) lowercase_ : Tuple = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ ) lowercase_ : Union[str, Any] = attn(lowercase_ , lowercase_ , deterministic=lowercase_ ) if self.add_upsample: lowercase_ : Union[str, Any] = self.upsamplers_a(lowercase_ ) return hidden_states class __magic_name__ ( nn.Module): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 0.0 UpperCamelCase__ = 1 UpperCamelCase__ = True UpperCamelCase__ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = [] for i in range(self.num_layers ): lowercase_ : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels lowercase_ : List[Any] = self.prev_output_channel if i == 0 else self.out_channels lowercase_ : Union[str, Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_ ) lowercase_ : Dict = resnets if self.add_upsample: lowercase_ : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Dict=True ): for resnet in self.resnets: # pop res hidden states lowercase_ : str = res_hidden_states_tuple[-1] lowercase_ : Any = res_hidden_states_tuple[:-1] lowercase_ : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) lowercase_ : Union[str, Any] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ ) if self.add_upsample: lowercase_ : Optional[Any] = self.upsamplers_a(lowercase_ ) return hidden_states class __magic_name__ ( nn.Module): UpperCamelCase__ = 42 UpperCamelCase__ = 0.0 UpperCamelCase__ = 1 UpperCamelCase__ = 1 UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self : Tuple ): # there is always at least one resnet lowercase_ : int = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] lowercase_ : str = [] for _ in range(self.num_layers ): lowercase_ : Optional[int] = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowercase_ ) lowercase_ : Optional[Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_ ) lowercase_ : str = resnets lowercase_ : int = attentions def __call__( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[Any]=True ): lowercase_ : Dict = self.resnets[0](lowercase_ , lowercase_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): lowercase_ : Optional[Any] = attn(lowercase_ , lowercase_ , deterministic=lowercase_ ) lowercase_ : Any = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ ) return hidden_states
719
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowercase : Dict = parser.parse_args() _lowercase : Dict = "cpu" _lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowercase : Any = "path-to-your-trained-model" _lowercase : str = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowercase : Any = pipe.to(device) # to channels last _lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last) _lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last) _lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowercase : int = torch.randn(2, 4, 64, 64) _lowercase : int = torch.rand(1) * 999 _lowercase : Union[str, Any] = torch.randn(2, 77, 768) _lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status) try: _lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowercase : int = 666 _lowercase : Any = torch.Generator(device).manual_seed(seed) _lowercase : int = {"generator": generator} if args.steps is not None: _lowercase : Optional[int] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
30
0
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _lowercase : Optional[int] = random.Random() def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int=1.0 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=None ) -> List[str]: if rng is None: lowercase_ : str = global_rng lowercase_ : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __magic_name__ ( unittest.TestCase): def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : List[Any]=400 , lowercase_ : List[Any]=2000 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=0.0 , lowercase_ : Any=16000 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=True , ): lowercase_ : List[str] = parent lowercase_ : int = batch_size lowercase_ : str = min_seq_length lowercase_ : int = max_seq_length lowercase_ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase_ : str = feature_size lowercase_ : Optional[Any] = padding_value lowercase_ : Dict = sampling_rate lowercase_ : List[str] = return_attention_mask lowercase_ : str = do_normalize def SCREAMING_SNAKE_CASE_ ( self : str ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any]=False , lowercase_ : str=False ): def _flatten(lowercase_ : str ): return list(itertools.chain(*lowercase_ ) ) if equal_length: lowercase_ : str = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size lowercase_ : Optional[Any] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowercase_ : List[Any] = [np.asarray(lowercase_ ) for x in speech_inputs] return speech_inputs class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = WavaVecaFeatureExtractor def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = WavaVecaFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ): self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1E-3 ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): # Tests that all call wrap to encode_plus and batch_encode_plus lowercase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : Dict = [np.asarray(lowercase_ ) for speech_input in speech_inputs] # Test not batched input lowercase_ : str = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values lowercase_ : List[str] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test batched lowercase_ : List[str] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values lowercase_ : Union[str, Any] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowercase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowercase_ : str = np.asarray(lowercase_ ) lowercase_ : Any = feat_extract(lowercase_ , return_tensors="""np""" ).input_values lowercase_ : Any = feat_extract(lowercase_ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : List[str] = ["""longest""", """max_length""", """do_not_pad"""] lowercase_ : Optional[int] = [None, 1600, None] for max_length, padding in zip(lowercase_ , lowercase_ ): lowercase_ : Union[str, Any] = feat_extract(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors="""np""" ) lowercase_ : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : List[str] = range(800 , 1400 , 200 ) lowercase_ : Any = [floats_list((1, x) )[0] for x in lengths] lowercase_ : List[str] = ["""longest""", """max_length""", """do_not_pad"""] lowercase_ : Union[str, Any] = [None, 1600, None] for max_length, padding in zip(lowercase_ , lowercase_ ): lowercase_ : Optional[Any] = feat_extract(lowercase_ , max_length=lowercase_ , padding=lowercase_ ) lowercase_ : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : Any = feat_extract( lowercase_ , truncation=lowercase_ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" ) lowercase_ : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : Tuple = feat_extract( lowercase_ , truncation=lowercase_ , max_length=1000 , padding="""longest""" , return_tensors="""np""" ) lowercase_ : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) lowercase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase_ : Optional[int] = feat_extract( lowercase_ , truncation=lowercase_ , max_length=2000 , padding="""longest""" , return_tensors="""np""" ) lowercase_ : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): import torch lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : Any = np.random.rand(100 ).astype(np.floataa ) lowercase_ : List[str] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowercase_ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) lowercase_ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: lowercase_ : Any = WavaVecaConfig.from_pretrained(lowercase_ ) lowercase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(lowercase_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
720
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Optional[Any] = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : Union[str, Any] = x lowercase_ : int = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Tuple = a * a - b * b + x lowercase_ : int = 2 * a * b + y lowercase_ : List[str] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : str = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : int = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Dict = figure_width / image_width * image_height lowercase_ : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Optional[int] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : List[Any] = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : Union[str, Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Optional[Any] = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : Dict = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
721
'''simple docstring''' import unittest import numpy as np def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray: lowercase_ : List[Any] = np.shape(UpperCAmelCase__ ) lowercase_ : Dict = np.shape(UpperCAmelCase__ ) lowercase_ : int = np.shape(UpperCAmelCase__ ) if shape_a[0] != shape_b[0]: lowercase_ : Optional[int] = ( """Expected the same number of rows for A and B. """ F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(UpperCAmelCase__ ) if shape_b[1] != shape_c[1]: lowercase_ : Optional[Any] = ( """Expected the same number of columns for B and C. """ F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(UpperCAmelCase__ ) lowercase_ : Any = pseudo_inv if a_inv is None: try: lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Dict = np.array([[2, 1], [6, 3]] ) lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] ) lowercase_ : Optional[int] = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) self.assertAlmostEqual(lowercase_ , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
0
'''simple docstring''' from collections.abc import Iterable from typing import Generic, TypeVar _lowercase : Optional[Any] = TypeVar("_T") class __magic_name__ ( Generic[_T]): def __init__( self : str , lowercase_ : Iterable[_T] | None = None ): lowercase_ : list[_T] = list(iterable or [] ) lowercase_ : list[_T] = [] def __len__( self : Optional[int] ): return len(self._stacka ) + len(self._stacka ) def __repr__( self : Optional[int] ): return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})''' def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : _T ): self._stacka.append(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Tuple = self._stacka.pop lowercase_ : Tuple = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
700
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(UpperCAmelCase__ ) lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data ) lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6) else: lowercase_ : Union[str, Any] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode() + padding ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[str] = ( """argument should be a bytes-like object or ASCII string, """ F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(UpperCAmelCase__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): try: lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowercase_ : Any = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase_ : Optional[int] = encoded_data[:-padding] lowercase_ : Any = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase_ : int = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data ) lowercase_ : Optional[int] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(UpperCAmelCase__ ) , 8 ) ] return bytes(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _lowercase : List[Any] = logging.get_logger(__name__) _lowercase : Any = ["model.decoder.embed_positions.weights"] def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> List[str]: if "emb" in name: lowercase_ : Tuple = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: lowercase_ : Optional[int] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: lowercase_ : int = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: lowercase_ : Tuple = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: lowercase_ : List[Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: lowercase_ : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: lowercase_ : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: lowercase_ : List[Any] = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: lowercase_ : Union[str, Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: lowercase_ : List[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: lowercase_ : List[str] = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase ( UpperCAmelCase__ : OrderedDict , UpperCAmelCase__ : int ) -> Tuple[Dict, Dict]: lowercase_ : Any = list(state_dict.keys() ) lowercase_ : Optional[int] = {} for key in keys: lowercase_ : Optional[int] = state_dict.pop(UpperCAmelCase__ ) lowercase_ : Any = rename_keys(UpperCAmelCase__ ) if "in_proj_weight" in key: # split fused qkv proj lowercase_ : Optional[Any] = val[:hidden_size, :] lowercase_ : List[str] = val[hidden_size : 2 * hidden_size, :] lowercase_ : str = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowercase_ : int = val else: lowercase_ : str = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase ( UpperCAmelCase__ : str ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values lowercase_ : int = 1024 lowercase_ : List[str] = 24 lowercase_ : List[Any] = 16 elif checkpoint == "medium": lowercase_ : Dict = 1536 lowercase_ : Union[str, Any] = 48 lowercase_ : List[Any] = 24 elif checkpoint == "large": lowercase_ : Tuple = 2048 lowercase_ : Union[str, Any] = 48 lowercase_ : Tuple = 32 else: raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) lowercase_ : Union[str, Any] = MusicgenDecoderConfig( hidden_size=UpperCAmelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , ) return config @torch.no_grad() def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict="cpu" ) -> List[Any]: lowercase_ : List[str] = MusicGen.get_pretrained(UpperCAmelCase__ , device=UpperCAmelCase__ ) lowercase_ : Optional[Any] = decoder_config_from_checkpoint(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = fairseq_model.lm.state_dict() lowercase_ : Optional[int] = rename_state_dict( UpperCAmelCase__ , hidden_size=decoder_config.hidden_size ) lowercase_ : int = TaEncoderModel.from_pretrained("""t5-base""" ) lowercase_ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) lowercase_ : Optional[Any] = MusicgenForCausalLM(UpperCAmelCase__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowercase_ : Union[str, Any] = decoder.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 0: raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' ) if len(UpperCAmelCase__ ) > 0: raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model lowercase_ : Tuple = MusicgenForConditionalGeneration(text_encoder=UpperCAmelCase__ , audio_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(UpperCAmelCase__ ) # check we can do a forward pass lowercase_ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowercase_ : List[str] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowercase_ : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor lowercase_ : List[str] = AutoTokenizer.from_pretrained("""t5-base""" ) lowercase_ : Optional[int] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) lowercase_ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ ) # set the appropriate bos/pad token ids lowercase_ : int = 2048 lowercase_ : Union[str, Any] = 2048 # set other default generation config params lowercase_ : List[str] = int(30 * audio_encoder.config.frame_rate ) lowercase_ : List[Any] = True lowercase_ : Union[str, Any] = 3.0 if pytorch_dump_folder is not None: Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(UpperCAmelCase__ ) processor.save_pretrained(UpperCAmelCase__ ) if repo_id: logger.info(F'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(UpperCAmelCase__ ) processor.push_to_hub(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) _lowercase : Tuple = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
701
'''simple docstring''' import argparse _lowercase : Optional[int] = "docs/source/_static/js/custom.js" def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict: with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Optional[int] = f.readlines() lowercase_ : Tuple = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += F''' "v{version}": "v{version}",\n''' with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") _lowercase : Dict = parser.parse_args() update_custom_js(args.version)
30
0
'''simple docstring''' import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class __magic_name__ ( unittest.TestCase): '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : Any ): lowercase_ : str = parent def SCREAMING_SNAKE_CASE_ ( self : Dict ): return {} def lowerCamelCase ( ) -> int: lowercase_ : Union[str, Any] = """<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR=\"FFFFFF\"> <HR> <a href=\"http://google.com\">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style=\"color:#0000FF\"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>""" lowercase_ : Optional[Any] = """ <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> """ return [html_string_a, html_string_a] @require_bsa class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): '''simple docstring''' UpperCamelCase__ = MarkupLMFeatureExtractor if is_bsa_available() else None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : List[Any] = MarkupLMFeatureExtractionTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): return self.feature_extract_tester.prepare_feat_extract_dict() def SCREAMING_SNAKE_CASE_ ( self : str ): # Initialize feature_extractor lowercase_ : List[Any] = self.feature_extraction_class() # Test not batched input lowercase_ : Union[str, Any] = get_html_strings()[0] lowercase_ : Tuple = feature_extractor(lowercase_ ) # fmt: off lowercase_ : List[Any] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]] lowercase_ : Union[str, Any] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]] # fmt: on self.assertEqual(encoding.nodes , lowercase_ ) self.assertEqual(encoding.xpaths , lowercase_ ) # Test batched lowercase_ : Tuple = get_html_strings() lowercase_ : Optional[Any] = feature_extractor(lowercase_ ) # fmt: off lowercase_ : List[str] = expected_nodes + [["""My First Heading""", """My first paragraph."""]] lowercase_ : int = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , lowercase_ ) self.assertEqual(encoding.xpaths , lowercase_ )
702
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ): lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : Any = image_size lowercase_ : Optional[Any] = num_channels lowercase_ : Any = embeddings_size lowercase_ : Union[str, Any] = hidden_sizes lowercase_ : Any = depths lowercase_ : Dict = is_training lowercase_ : Tuple = use_labels lowercase_ : str = hidden_act lowercase_ : Optional[Any] = num_labels lowercase_ : Tuple = scope lowercase_ : Any = len(lowercase_ ) lowercase_ : Optional[Any] = out_features lowercase_ : Tuple = out_indices lowercase_ : str = num_groups def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : List[Any] = None if self.use_labels: lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : int = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ): lowercase_ : Optional[int] = BitModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ): lowercase_ : Union[str, Any] = self.num_labels lowercase_ : Tuple = BitForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Any = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ): lowercase_ : Any = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Dict = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase_ : List[str] = None lowercase_ : Dict = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[int] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs lowercase_ : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = BitModelTester(self ) lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return @unittest.skip(reason="""Bit does not output attentions""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[Any] = model_class(lowercase_ ) lowercase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(config=lowercase_ ) for name, module in model.named_modules(): if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : Optional[Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase_ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Dict = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase_ : Union[str, Any] = layer_type lowercase_ : Optional[Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Union[str, Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ ) lowercase_ : int = self.default_image_processor lowercase_ : List[Any] = prepare_img() lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : str = model(**lowercase_ ) # verify the logits lowercase_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitBackbone,) if is_torch_available() else () UpperCamelCase__ = BitConfig UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Union[str, Any] = BitModelTester(self )
30
0
from functools import reduce _lowercase : List[str] = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def lowerCamelCase ( UpperCAmelCase__ : str = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCAmelCase__ , UpperCAmelCase__ : str(int(UpperCAmelCase__ ) * int(UpperCAmelCase__ ) ) , n[i : i + 13] ) ) for i in range(len(UpperCAmelCase__ ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
703
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge _lowercase : Optional[Any] = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] _lowercase : List[Any] = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowerCamelCase ( ) -> List[str]: lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] ) assert ( pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean() ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Tuple = """rougeLsum""" lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCamelCase ( ) -> List[Any]: lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""] lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) assert score_sep == score_no_sep def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Union[str, Any] = [ """Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""", """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""", ] lowercase_ : List[str] = [ """Margot Frank, died in 1945, a month earlier than previously thought.""", """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of""" """ the final seconds on board Flight 9525.""", ] assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) def lowerCamelCase ( ) -> Union[str, Any]: lowercase_ : Optional[Any] = [ """\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """ ] lowercase_ : List[Any] = [ """ Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .""" ] lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""] lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""] assert new_score > prev_score def lowerCamelCase ( ) -> Tuple: lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" ) lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = calculate_rouge_path( data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : Any = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = ["ConditionalDetrFeatureExtractor"] _lowercase : Optional[Any] = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys _lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
704
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''speech_to_text''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ): lowercase_ : List[Any] = vocab_size lowercase_ : str = d_model lowercase_ : List[Any] = encoder_ffn_dim lowercase_ : str = encoder_layers lowercase_ : Dict = encoder_attention_heads lowercase_ : str = decoder_ffn_dim lowercase_ : int = decoder_layers lowercase_ : Any = decoder_attention_heads lowercase_ : Any = dropout lowercase_ : Dict = attention_dropout lowercase_ : Optional[int] = activation_dropout lowercase_ : Any = activation_function lowercase_ : Union[str, Any] = init_std lowercase_ : str = encoder_layerdrop lowercase_ : Optional[int] = decoder_layerdrop lowercase_ : Dict = use_cache lowercase_ : Union[str, Any] = encoder_layers lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True lowercase_ : Dict = max_source_positions lowercase_ : Optional[int] = max_target_positions lowercase_ : Tuple = num_conv_layers lowercase_ : Tuple = list(lowercase_ ) lowercase_ : Union[str, Any] = conv_channels lowercase_ : str = input_feat_per_channel lowercase_ : str = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
30
0