code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class __lowerCamelCase ( lowerCAmelCase_ ): '''simple docstring''' A_ : Optional[Any] = 'EncodecFeatureExtractor' A_ : Tuple = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: super().__init__(__a , __a ) _a = self.feature_extractor _a = False def _UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Tuple: return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a ) def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: if self._in_target_context_manager: return self.current_processor(*__a , **__a ) _a = kwargs.pop('''audio''' , __a ) _a = kwargs.pop('''sampling_rate''' , __a ) _a = kwargs.pop('''text''' , __a ) if len(__a ) > 0: _a = args[0] _a = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: _a = self.tokenizer(__a , **__a ) if audio is not None: _a = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a ) if audio is None: return inputs elif text is None: return audio_inputs else: _a = audio_inputs['input_values'] if "padding_mask" in audio_inputs: _a = audio_inputs['padding_mask'] return inputs def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: _a = kwargs.pop('''audio''' , __a ) _a = kwargs.pop('''padding_mask''' , __a ) if len(__a ) > 0: _a = args[0] _a = args[1:] if audio_values is not None: return self._decode_audio(__a , padding_mask=__a ) else: return self.tokenizer.batch_decode(*__a , **__a ) def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: return self.tokenizer.decode(*__a , **__a ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Optional[int]: _a = to_numpy(__a ) _a = audio_values.shape if padding_mask is None: return list(__a ) _a = to_numpy(__a ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _a = seq_len - padding_mask.shape[-1] _a = 1 - self.feature_extractor.padding_value _a = np.pad(__a , ((0, 0), (0, difference)) , '''constant''' , constant_values=__a ) _a = audio_values.tolist() for i in range(__a ): _a = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _a = sliced_audio.reshape(__a , -1 ) return audio_values
320
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = StableDiffusionInpaintPipeline A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A_ = frozenset([] ) def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) __a : str = PNDMScheduler(skip_prk_steps=__a ) torch.manual_seed(0 ) __a : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __a : Dict = CLIPTextModel(__a ) __a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __a : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' __a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) __a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) ) __a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) ) if str(__a ).startswith('mps' ): __a : Any = torch.manual_seed(__a ) else: __a : str = torch.Generator(device=__a ).manual_seed(__a ) __a : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : str = self.get_dummy_components() __a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a ) __a : List[Any] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) __a : List[Any] = self.get_dummy_inputs(__a ) __a : Dict = sd_pipe(**__a ).images __a : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) __a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting' __a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : int = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : List[str] = StableDiffusionInpaintPipeline.from_pretrained( __a , torch_dtype=torch.floataa , safety_checker=__a , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : int = torch.manual_seed(0 ) __a : Optional[Any] = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __UpperCAmelCase ( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' ) __a : str = StableDiffusionInpaintPipeline.from_pretrained( __a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __a : str = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : str = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , ) __a : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
27
0
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __lowerCAmelCase = CLIPImageProcessor() __lowerCAmelCase = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') __lowerCAmelCase = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
107
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests __lowerCAmelCase = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user __lowerCAmelCase = BASE_URL + '''/user''' # https://github.com/settings/tokens __lowerCAmelCase = os.environ.get('''USER_TOKEN''', '''''') def __lowerCamelCase ( lowerCAmelCase_ ) -> dict[Any, Any]: _a : Union[str, Any] = { 'Authorization': f"""token {auth_token}""", 'Accept': 'application/vnd.github.v3+json', } return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f"""{key}: {value}""") else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
107
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __lowerCamelCase = logging.get_logger(__name__) @dataclass class A__ ( lowerCamelCase_ ): lowercase = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **UpperCamelCase__ ) -> Any: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A_ = deprecated_arg[3:] setattr(self , __a , not kwargs.pop(__a ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) A_ = kwargs.pop("""torchscript""" , self.torchscript ) A_ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) A_ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**__a ) lowercase = field(default=lowerCamelCase_ , metadata={"help": "Trace the models using torchscript"} ) lowercase = field(default=lowerCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) lowercase = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: A_ = torch.device("""cpu""" ) A_ = 0 elif is_torch_tpu_available(): A_ = xm.xla_device() A_ = 0 else: A_ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A_ = torch.cuda.device_count() return device, n_gpu @property def snake_case_ ( self ) -> Any: '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def snake_case_ ( self ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def snake_case_ ( self ) -> List[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def snake_case_ ( self ) -> List[str]: '''simple docstring''' return self.n_gpu > 0
162
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]: _a = AutoTokenizer.from_pretrained(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase ) _a = tok.pad_token_id def get_lens(lowercase : Optional[int] ): _a = tqdm( DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a = [] for batch in dl: _a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist() _a = batch["labels"].ne(lowercase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowercase , lowercase ): max_lens.append(max(lowercase , lowercase ) ) else: max_lens.extend(lowercase ) return max_lens _a = get_lens(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase ) _a = get_lens(lowercase ) pickle_save(lowercase , train_ds.len_file ) pickle_save(lowercase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
63
0
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class snake_case_ ( unittest.TestCase ): @property def UpperCAmelCase__ ( self : Any )->int: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self : List[Any] )->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : int = ort.SessionOptions() __lowerCAmelCase : int = False return options def UpperCAmelCase__ ( self : Optional[Any] )->Any: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) __lowerCAmelCase : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) __lowerCAmelCase : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default __lowerCAmelCase : int = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __lowerCAmelCase : Tuple = """A red cat sitting on a park bench""" __lowerCAmelCase : Dict = np.random.RandomState(0 ) __lowerCAmelCase : Any = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_snake_case , output_type="""np""" , ) __lowerCAmelCase : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
354
import random def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :Dict ) -> tuple: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = [], [], [] for element in data: if element < pivot: less.append(SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(SCREAMING_SNAKE_CASE ) else: equal.append(SCREAMING_SNAKE_CASE ) return less, equal, greater def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :int ) -> Dict: # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(SCREAMING_SNAKE_CASE ) or index < 0: return None __lowerCAmelCase : Union[str, Any] = items[random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )] __lowerCAmelCase : int = 0 __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = _partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(SCREAMING_SNAKE_CASE , index - (m + count) )
232
0
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _a : """simple docstring""" def __init__( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : List[Any]=[30, 30] , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : str=3 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Any=True , UpperCAmelCase : List[str]=32 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : str=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : str=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=8 , UpperCAmelCase : Union[str, Any]=10 , ): A_ = parent A_ = batch_size A_ = image_size A_ = patch_size A_ = num_channels A_ = is_training A_ = use_labels A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = type_sequence_label_size A_ = initializer_range A_ = num_labels A_ = scope A_ = n_targets A_ = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens A_ = (image_size[1] // patch_size) * (image_size[0] // patch_size) A_ = num_patches + 1 + self.num_detection_tokens def __A ( self : Optional[int] ): A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) A_ = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) A_ = [] for i in range(self.batch_size ): A_ = {} A_ = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=UpperCAmelCase ) A_ = torch.rand(self.n_targets , 4 , device=UpperCAmelCase ) labels.append(UpperCAmelCase ) A_ = self.get_config() return config, pixel_values, labels def __A ( self : Any ): return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ): A_ = YolosModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] ): A_ = YolosForObjectDetection(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(pixel_values=UpperCAmelCase ) A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) A_ = model(pixel_values=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def __A ( self : Tuple ): A_ = self.prepare_config_and_inputs() A_ , A_ , A_ = config_and_inputs A_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else () _lowerCamelCase : List[Any] = ( {'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {} ) _lowerCamelCase : Any = False _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Optional[int] = False def __A ( self : str , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=False ): A_ = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": A_ = [] for i in range(self.model_tester.batch_size ): A_ = {} A_ = torch.ones( size=(self.model_tester.n_targets,) , device=UpperCAmelCase , dtype=torch.long ) A_ = torch.ones( self.model_tester.n_targets , 4 , device=UpperCAmelCase , dtype=torch.float ) labels.append(UpperCAmelCase ) A_ = labels return inputs_dict def __A ( self : List[str] ): A_ = YolosModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def __A ( self : Tuple ): self.config_tester.run_common_tests() def __A ( self : str ): # YOLOS does not use inputs_embeds pass def __A ( self : Tuple ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) ) def __A ( self : Optional[Any] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(UpperCAmelCase ) A_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ = [*signature.parameters.keys()] A_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def __A ( self : Dict ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True # in YOLOS, the seq_len is different A_ = self.model_tester.expected_seq_len for model_class in self.all_model_classes: A_ = True A_ = False A_ = True A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = outputs.attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A_ = True A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = outputs.attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) A_ = len(UpperCAmelCase ) # Check attention is always last and order is fine A_ = True A_ = True A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = 1 self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase ) ) A_ = outputs.attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def __A ( self : Tuple ): def check_hidden_states_output(UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Dict ): A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = outputs.hidden_states A_ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) # YOLOS has a different seq_length A_ = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def __A ( self : Union[str, Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*UpperCAmelCase ) @slow def __A ( self : Dict ): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = YolosModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def __snake_case ( ): """simple docstring""" A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _a ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : int ): return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None @slow def __A ( self : List[str] ): A_ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(UpperCAmelCase ) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): A_ = model(inputs.pixel_values ) # verify outputs A_ = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) A_ = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=UpperCAmelCase , ) A_ = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) ) # verify postprocessing A_ = image_processor.post_process_object_detection( UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] A_ = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(UpperCAmelCase ) A_ = [75, 75, 17, 63, 17] A_ = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(UpperCAmelCase ) self.assertEqual(len(results["scores"] ) , 5 ) self.assertTrue(torch.allclose(results["scores"] , UpperCAmelCase , atol=1E-4 ) ) self.assertSequenceEqual(results["labels"].tolist() , UpperCAmelCase ) self.assertTrue(torch.allclose(results["boxes"][0, :] , UpperCAmelCase ) )
312
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Dict , UpperCAmelCase : int = 768 , ): super().__init__() A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) ) A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) ) def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ): A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) ) A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) ) return self def __A ( self : Dict , UpperCAmelCase : List[Any] ): A_ = (embeds - self.mean) * 1.0 / self.std return embeds def __A ( self : int , UpperCAmelCase : int ): A_ = (embeds * self.std) + self.mean return embeds
312
1
'''simple docstring''' def A_( A : Optional[int]): UpperCamelCase = [0] * len(A) UpperCamelCase = [] UpperCamelCase = [1] * len(A) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(A)): if indegree[i] == 0: queue.append(A) while queue: UpperCamelCase = queue.pop(0) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCamelCase = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(A) print(max(A)) # Adjacency list of Graph lowerCAmelCase : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
251
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class SCREAMING_SNAKE_CASE__ : def __init__( self , A_ , A_=13 , A_=2 , A_=24 , A_=16 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=None , A_=2 , A_=2 , )-> int: '''simple docstring''' UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = patch_size UpperCamelCase = max_length UpperCamelCase = num_mel_bins UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = scope UpperCamelCase = frequency_stride UpperCamelCase = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1 UpperCamelCase = frequency_out_dimension * time_out_dimension UpperCamelCase = num_patches + 2 def UpperCAmelCase_ ( self )-> int: '''simple docstring''' UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, input_values, labels def UpperCAmelCase_ ( self )-> int: '''simple docstring''' return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]: '''simple docstring''' UpperCamelCase = ASTModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_values': input_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase): lowerCAmelCase_ = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) lowerCAmelCase_ = ( {"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel} if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ )-> Dict: '''simple docstring''' if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' UpperCamelCase = ASTModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def UpperCAmelCase_ ( self )-> Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='AST does not use inputs_embeds' ) def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' pass def UpperCAmelCase_ ( self )-> Any: '''simple docstring''' UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(A_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['input_values'] self.assertListEqual(arg_names[:1] , A_ ) def UpperCAmelCase_ ( self )-> str: '''simple docstring''' UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) @slow def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = ASTModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def A_( ): UpperCamelCase = hf_hub_download( repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset') UpperCamelCase , UpperCamelCase = torchaudio.load(A) return audio, sampling_rate @require_torch @require_torchaudio class SCREAMING_SNAKE_CASE__ ( unittest.TestCase): @cached_property def UpperCAmelCase_ ( self )-> str: '''simple docstring''' return ( ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ) if is_torchaudio_available() else None ) @slow def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' UpperCamelCase = self.default_feature_extractor UpperCamelCase = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(A_ ) UpperCamelCase = self.default_feature_extractor UpperCamelCase , UpperCamelCase = prepare_audio() UpperCamelCase = audio.squeeze().numpy() UpperCamelCase = feature_extractor(A_ , sampling_rate=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**A_ ) # verify the logits UpperCamelCase = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , A_ ) UpperCamelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
251
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
import math def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool: assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False __lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict: __lowercase = factor * value __lowercase = value while not is_prime(SCREAMING_SNAKE_CASE ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE ) return value
325
1
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup _A = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def __UpperCamelCase ( _A = "mumbai" ): lowerCAmelCase_ = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ): lowerCAmelCase_ = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip() lowerCAmelCase_ = job.find('''span''' , {'''class''': '''company'''} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(f"Job {i:>2} is {job[0]} at {job[1]}")
167
import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging _A = logging.get_logger(__name__) def __UpperCamelCase ( _A , _A , _A , _A=False ): try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCAmelCase_ = os.path.abspath(_A ) logger.info(f"Loading PyTorch weights from {pt_path}" ) lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." ) lowerCAmelCase_ = convert_pytorch_state_dict_to_flax(_A , _A ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCAmelCase_ = convert_pytorch_sharded_state_dict_to_flax(_A , _A ) return flax_state_dict def __UpperCamelCase ( _A , _A , _A , _A , ): def is_key_or_prefix_key_in_dict(_A ) -> bool: return len(set(_A ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCAmelCase_ = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_A ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCAmelCase_ = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_A ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCAmelCase_ = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_A ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCAmelCase_ = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_A ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_A ): lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_A ): lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCAmelCase_ = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCAmelCase_ = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCAmelCase_ = pt_tuple_key[-2] + '''_v''' if name is not None: lowerCAmelCase_ = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __UpperCamelCase ( _A , _A ): # convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} lowerCAmelCase_ = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCAmelCase_ = flax_model.params['''params'''] else: lowerCAmelCase_ = flax_model.params lowerCAmelCase_ = flatten_dict(_A ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCAmelCase_ = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(_A ) lowerCAmelCase_ = {} lowerCAmelCase_ = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCAmelCase_ = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCAmelCase_ = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCAmelCase_ = pt_tuple_key[1:] # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor( _A , _A , _A , _A ) # add model prefix if necessary lowerCAmelCase_ = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCAmelCase_ = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCAmelCase_ = jnp.asarray(_A ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_A , _A ) continue # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(_A ) else: # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(_A ) return unflatten_dict(_A ) def __UpperCamelCase ( _A , _A ): import torch # Load the index lowerCAmelCase_ = {} for shard_file in shard_filenames: # load using msgpack utils lowerCAmelCase_ = torch.load(_A ) lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} lowerCAmelCase_ = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCAmelCase_ = flax_model.params['''params'''] lowerCAmelCase_ = flatten_dict(_A ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCAmelCase_ = flax_model.params lowerCAmelCase_ = flatten_dict(_A ) lowerCAmelCase_ = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCAmelCase_ = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCAmelCase_ = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCAmelCase_ = pt_tuple_key[1:] # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor( _A , _A , _A , _A ) # add model prefix if necessary lowerCAmelCase_ = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCAmelCase_ = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCAmelCase_ = jnp.asarray(_A ) continue if "var" in flax_key[-1]: lowerCAmelCase_ = jnp.asarray(_A ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_A , _A ) continue # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(_A ) else: # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(_A ) return unflatten_dict(_A ) def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = os.path.abspath(_A ) logger.info(f"Loading Flax weights from {flax_checkpoint_path}" ) # import correct flax class lowerCAmelCase_ = getattr(_A , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(_A , '''rb''' ) as state_f: try: lowerCAmelCase_ = from_bytes(_A , state_f.read() ) except UnpicklingError: raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(_A , _A ) def __UpperCamelCase ( _A , _A ): try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCAmelCase_ = flatten_dict(jax.tree_util.tree_map(lambda _A : x.dtype == jnp.bfloataa , _A ) ).values() if any(_A ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCAmelCase_ = jax.tree_util.tree_map( lambda _A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _A ) lowerCAmelCase_ = flatten_dict(_A ) lowerCAmelCase_ = pt_model.state_dict() lowerCAmelCase_ = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCAmelCase_ = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCAmelCase_ = [] lowerCAmelCase_ = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase_ = flax_key_tuple[0] == pt_model.base_model_prefix lowerCAmelCase_ = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCAmelCase_ = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCAmelCase_ = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_A ) not in pt_model_dict: # conv layer lowerCAmelCase_ = flax_key_tuple[:-1] + ('''weight''',) lowerCAmelCase_ = jnp.transpose(_A , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_A ) not in pt_model_dict: # linear layer lowerCAmelCase_ = flax_key_tuple[:-1] + ('''weight''',) lowerCAmelCase_ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCAmelCase_ = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCAmelCase_ = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCAmelCase_ = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCAmelCase_ = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCAmelCase_ = '''.'''.join(_A ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCAmelCase_ = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCAmelCase_ = key.split('''.''' ) lowerCAmelCase_ = None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCAmelCase_ = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCAmelCase_ = key_components[-2] + '''_v''' if name is not None: lowerCAmelCase_ = key_components[:-3] + [name] lowerCAmelCase_ = '''.'''.join(_A ) lowerCAmelCase_ = key if flax_key in special_pt_names: lowerCAmelCase_ = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict lowerCAmelCase_ = np.asarray(_A ) if not isinstance(_A , np.ndarray ) else flax_tensor lowerCAmelCase_ = torch.from_numpy(_A ) # remove from missing keys missing_keys.remove(_A ) else: # weight is not expected by PyTorch model unexpected_keys.append(_A ) pt_model.load_state_dict(_A ) # re-transform missing_keys to list lowerCAmelCase_ = list(_A ) if len(_A ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" ) if len(_A ) > 0: logger.warning( f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" ''' use it for predictions and inference.''' ) else: logger.warning( f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n" '''If your task is similar to the task the model of the checkpoint was trained on, ''' f"you can already use {pt_model.__class__.__name__} for predictions without further training." ) return pt_model
167
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _A = 1_6 _A = 3_2 def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> List[str]: lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase__ : List[str] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase__ : str = datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase__ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase__ : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase__ : Optional[Any] = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase__ : Any = 8 else: lowerCAmelCase__ : Any = None return tokenizer.pad( __UpperCAmelCase , padding="""longest""" , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCAmelCase__ : Any = DataLoader( tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _A = mocked_dataloaders # noqa: F811 def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict: # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCAmelCase ) == "1": lowerCAmelCase__ : List[Any] = 2 # New Code # lowerCAmelCase__ : Tuple = int(args.gradient_accumulation_steps ) # Initialize accelerator lowerCAmelCase__ : Union[str, Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase__ : Tuple = config["""lr"""] lowerCAmelCase__ : int = int(config["""num_epochs"""] ) lowerCAmelCase__ : List[Any] = int(config["""seed"""] ) lowerCAmelCase__ : Tuple = int(config["""batch_size"""] ) lowerCAmelCase__ : Optional[int] = evaluate.load("""glue""" , """mrpc""" ) set_seed(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCAmelCase ) # Instantiate scheduler lowerCAmelCase__ : Optional[Any] = get_linear_schedule_with_warmup( optimizer=__UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Now we train the model for epoch in range(__UpperCAmelCase ): model.train() for step, batch in enumerate(__UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__UpperCAmelCase ): lowerCAmelCase__ : str = model(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = output.loss accelerator.backward(__UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase__ : int = model(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__UpperCAmelCase , references=__UpperCAmelCase , ) lowerCAmelCase__ : Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __UpperCAmelCase ) def lowercase_ ( ) -> Any: lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=__UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCAmelCase__ : List[str] = parser.parse_args() lowerCAmelCase__ : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": main()
242
"""simple docstring""" from typing import Any import numpy as np def lowercase_ ( __UpperCAmelCase ) -> bool: return np.array_equal(__UpperCAmelCase , matrix.conjugate().T ) def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[int] = v.conjugate().T lowerCAmelCase__ : Optional[int] = v_star.dot(__UpperCAmelCase ) assert isinstance(__UpperCAmelCase , np.ndarray ) return (v_star_dot.dot(__UpperCAmelCase )) / (v_star.dot(__UpperCAmelCase )) def lowercase_ ( ) -> None: lowerCAmelCase__ : Union[str, Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) lowerCAmelCase__ : List[str] = np.array([[1], [2], [3]] ) assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian.""" print(rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : Union[str, Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian.""" assert rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
242
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = DPTConfig() if "large" in checkpoint_url: lowercase__ : str = 1024 lowercase__ : List[str] = 4096 lowercase__ : List[Any] = 24 lowercase__ : Dict = 16 lowercase__ : Union[str, Any] = [5, 11, 17, 23] lowercase__ : Any = [256, 512, 1024, 1024] lowercase__ : Optional[int] = (1, 384, 384) if "ade" in checkpoint_url: lowercase__ : Union[str, Any] = True lowercase__ : Tuple = 150 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : str = "ade20k-id2label.json" lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} lowercase__ : Tuple = [1, 150, 480, 480] return config, expected_shape def lowercase_ ( _lowerCamelCase : List[Any]): lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder") if "pretrained.model" in name: lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings") if "patch_embed" in name: lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings") if "pos_embed" in name: lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings") if "attn.proj" in name: lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense") if "proj" in name and "project" not in name: lowercase__ : int = name.replace("proj" , "projection") if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layer") if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense") if "mlp.fc2" in name: lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense") if "norm1" in name: lowercase__ : List[str] = name.replace("norm1" , "layernorm_before") if "norm2" in name: lowercase__ : Dict = name.replace("norm2" , "layernorm_after") if "scratch.output_conv" in name: lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head") if "scratch" in name: lowercase__ : str = name.replace("scratch" , "neck") if "layer1_rn" in name: lowercase__ : int = name.replace("layer1_rn" , "convs.0") if "layer2_rn" in name: lowercase__ : int = name.replace("layer2_rn" , "convs.1") if "layer3_rn" in name: lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2") if "layer4_rn" in name: lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3") if "refinenet" in name: lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''') if "out_conv" in name: lowercase__ : str = name.replace("out_conv" , "projection") if "resConfUnit1" in name: lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1") if "resConfUnit2" in name: lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2") if "conv1" in name: lowercase__ : List[Any] = name.replace("conv1" , "convolution1") if "conv2" in name: lowercase__ : Tuple = name.replace("conv2" , "convolution2") # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0") if "pretrained.act_postprocess2.0.project.0" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0") if "pretrained.act_postprocess3.0.project.0" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0") if "pretrained.act_postprocess4.0.project.0" in name: lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0") # resize blocks if "pretrained.act_postprocess1.3" in name: lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection") if "pretrained.act_postprocess1.4" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize") if "pretrained.act_postprocess2.3" in name: lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection") if "pretrained.act_postprocess2.4" in name: lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize") if "pretrained.act_postprocess3.3" in name: lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection") if "pretrained.act_postprocess4.3" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection") if "pretrained.act_postprocess4.4" in name: lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize") if "pretrained" in name: lowercase__ : Any = name.replace("pretrained" , "dpt") if "bn" in name: lowercase__ : str = name.replace("bn" , "batch_norm") if "head" in name: lowercase__ : Optional[Any] = name.replace("head" , "head.head") if "encoder.norm" in name: lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm") if "auxlayer" in name: lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head") return name def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''') lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''') # next, add query, keys and values (in that order) to the state dict lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :] lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size] lowercase__ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] lowercase__ : int = in_proj_bias[-config.hidden_size :] def lowercase_ ( ): lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw) return im @torch.no_grad() def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict): lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase) # load original state_dict from URL lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu") # remove certain keys remove_ignore_keys_(_lowerCamelCase) # rename keys for key in state_dict.copy().keys(): lowercase__ : List[str] = state_dict.pop(_lowerCamelCase) lowercase__ : List[Any] = val # read in qkv matrices read_in_q_k_v(_lowerCamelCase , _lowerCamelCase) # load HuggingFace model lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase) model.load_state_dict(_lowerCamelCase) model.eval() # Check outputs on an image lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384 lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase) lowercase__ : List[str] = prepare_img() lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt") # forward pass lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth # Assert logits lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]) if "ade" in checkpoint_url: lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]) assert outputs.shape == torch.Size(_lowerCamelCase) assert ( torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase) ) Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase) print(f'''Saving model to {pytorch_dump_folder_path}''') model.save_pretrained(_lowerCamelCase) print(f'''Saving image processor to {pytorch_dump_folder_path}''') image_processor.save_pretrained(_lowerCamelCase) if push_to_hub: print("Pushing model to hub...") model.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) UpperCamelCase = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
333
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowercase_ ( _lowerCamelCase : List[str]): return 1 / (1 + np.exp(-z)) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple): return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase))) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000): lowercase__ : Optional[int] = np.zeros(x.shape[1]) for iterations in range(_lowerCamelCase): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Tuple = sigmoid_function(_lowerCamelCase) lowercase__ : Dict = np.dot(x.T , h - y) / y.size lowercase__ : int = theta - alpha * gradient # updating the weights lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase) lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase) if iterations % 100 == 0: print(f'''loss: {j} \t''') # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase = datasets.load_iris() UpperCamelCase = iris.data[:, :2] UpperCamelCase = (iris.target != 0) * 1 UpperCamelCase = 0.1 UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def lowercase_ ( _lowerCamelCase : List[Any]): return sigmoid_function( np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
333
1
'''simple docstring''' import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class lowercase_ ( a__ , a__ ): @register_to_config def __init__( self , a = 1_28 , a = 2_56 , a = 2000.0 , a = 7_68 , a = 12 , a = 12 , a = 64 , a = 20_48 , a = 0.1 , ): super().__init__() UpperCamelCase__ = nn.Sequential( nn.Linear(a , d_model * 4 , bias=a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a ) , nn.SiLU() , ) UpperCamelCase__ = nn.Embedding(a , a ) UpperCamelCase__ = False UpperCamelCase__ = nn.Linear(a , a , bias=a ) UpperCamelCase__ = nn.Dropout(p=a ) UpperCamelCase__ = nn.ModuleList() for lyr_num in range(a ): # FiLM conditional T5 decoder UpperCamelCase__ = DecoderLayer(d_model=a , d_kv=a , num_heads=a , d_ff=a , dropout_rate=a ) self.decoders.append(a ) UpperCamelCase__ = TaLayerNorm(a ) UpperCamelCase__ = nn.Dropout(p=a ) UpperCamelCase__ = nn.Linear(a , a , bias=a ) def __a ( self , a , a ): UpperCamelCase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def __a ( self , a , a , a ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. UpperCamelCase__ = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) UpperCamelCase__ = self.conditioning_emb(a ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) UpperCamelCase__ = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. UpperCamelCase__ = torch.broadcast_to( torch.arange(a , device=decoder_input_tokens.device ) , (batch, seq_length) , ) UpperCamelCase__ = self.position_encoding(a ) UpperCamelCase__ = self.continuous_inputs_projection(a ) inputs += position_encodings UpperCamelCase__ = self.dropout(a ) # decoder: No padding present. UpperCamelCase__ = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. UpperCamelCase__ = [(x, self.encoder_decoder_mask(a , a )) for x, y in encodings_and_masks] # cross attend style: concat encodings UpperCamelCase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) UpperCamelCase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: UpperCamelCase__ = lyr( a , conditioning_emb=a , encoder_hidden_states=a , encoder_attention_mask=a , )[0] UpperCamelCase__ = self.decoder_norm(a ) UpperCamelCase__ = self.post_dropout(a ) UpperCamelCase__ = self.spec_out(a ) return spec_out class lowercase_ ( nn.Module ): def __init__( self , a , a , a , a , a , a=1e-6 ): super().__init__() UpperCamelCase__ = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=a , d_kv=a , num_heads=a , dropout_rate=a ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=a , d_kv=a , num_heads=a , dropout_rate=a , layer_norm_epsilon=a , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=a , d_ff=a , dropout_rate=a , layer_norm_epsilon=a ) ) def __a ( self , a , a=None , a=None , a=None , a=None , a=None , ): UpperCamelCase__ = self.layer[0]( a , conditioning_emb=a , attention_mask=a , ) if encoder_hidden_states is not None: UpperCamelCase__ = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to( encoder_hidden_states.dtype ) UpperCamelCase__ = self.layer[1]( a , key_value_states=a , attention_mask=a , ) # Apply Film Conditional Feed Forward layer UpperCamelCase__ = self.layer[-1](a , a ) return (hidden_states,) class lowercase_ ( nn.Module ): def __init__( self , a , a , a , a ): super().__init__() UpperCamelCase__ = TaLayerNorm(a ) UpperCamelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a ) UpperCamelCase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a ) UpperCamelCase__ = nn.Dropout(a ) def __a ( self , a , a=None , a=None , ): # pre_self_attention_layer_norm UpperCamelCase__ = self.layer_norm(a ) if conditioning_emb is not None: UpperCamelCase__ = self.FiLMLayer(a , a ) # Self-attention block UpperCamelCase__ = self.attention(a ) UpperCamelCase__ = hidden_states + self.dropout(a ) return hidden_states class lowercase_ ( nn.Module ): def __init__( self , a , a , a , a , a ): super().__init__() UpperCamelCase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a ) UpperCamelCase__ = TaLayerNorm(a , eps=a ) UpperCamelCase__ = nn.Dropout(a ) def __a ( self , a , a=None , a=None , ): UpperCamelCase__ = self.layer_norm(a ) UpperCamelCase__ = self.attention( a , encoder_hidden_states=a , attention_mask=attention_mask.squeeze(1 ) , ) UpperCamelCase__ = hidden_states + self.dropout(a ) return layer_output class lowercase_ ( nn.Module ): def __init__( self , a , a , a , a ): super().__init__() UpperCamelCase__ = TaDenseGatedActDense(d_model=a , d_ff=a , dropout_rate=a ) UpperCamelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a ) UpperCamelCase__ = TaLayerNorm(a , eps=a ) UpperCamelCase__ = nn.Dropout(a ) def __a ( self , a , a=None ): UpperCamelCase__ = self.layer_norm(a ) if conditioning_emb is not None: UpperCamelCase__ = self.film(a , a ) UpperCamelCase__ = self.DenseReluDense(a ) UpperCamelCase__ = hidden_states + self.dropout(a ) return hidden_states class lowercase_ ( nn.Module ): def __init__( self , a , a , a ): super().__init__() UpperCamelCase__ = nn.Linear(a , a , bias=a ) UpperCamelCase__ = nn.Linear(a , a , bias=a ) UpperCamelCase__ = nn.Linear(a , a , bias=a ) UpperCamelCase__ = nn.Dropout(a ) UpperCamelCase__ = NewGELUActivation() def __a ( self , a ): UpperCamelCase__ = self.act(self.wi_a(a ) ) UpperCamelCase__ = self.wi_a(a ) UpperCamelCase__ = hidden_gelu * hidden_linear UpperCamelCase__ = self.dropout(a ) UpperCamelCase__ = self.wo(a ) return hidden_states class lowercase_ ( nn.Module ): def __init__( self , a , a=1e-6 ): super().__init__() UpperCamelCase__ = nn.Parameter(torch.ones(a ) ) UpperCamelCase__ = eps def __a ( self , a ): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 UpperCamelCase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a ) UpperCamelCase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: UpperCamelCase__ = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class lowercase_ ( nn.Module ): def __a ( self , a ): return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(a , 3.0 )) )) class lowercase_ ( nn.Module ): def __init__( self , a , a ): super().__init__() UpperCamelCase__ = nn.Linear(a , out_features * 2 , bias=a ) def __a ( self , a , a ): UpperCamelCase__ = self.scale_bias(a ) UpperCamelCase__ , UpperCamelCase__ = torch.chunk(a , 2 , -1 ) UpperCamelCase__ = x * (1 + scale) + shift return x
80
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> List[str]: '''simple docstring''' with open(__A ) as metadata_file: UpperCamelCase__ = json.load(__A ) UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__A , **metadata["model_config"] ) # Load in the weights from the checkpoint_path UpperCamelCase__ = torch.load(__A , map_location="cpu" )["module"] # Load the entity vocab file UpperCamelCase__ = load_original_entity_vocab(__A ) # add an entry for [MASK2] UpperCamelCase__ = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks UpperCamelCase__ = AddedToken("<ent>" , lstrip=__A , rstrip=__A ) UpperCamelCase__ = AddedToken("<ent2>" , lstrip=__A , rstrip=__A ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__A ) with open(os.path.join(__A , "tokenizer_config.json" ) , "r" ) as f: UpperCamelCase__ = json.load(__A ) UpperCamelCase__ = "MLukeTokenizer" with open(os.path.join(__A , "tokenizer_config.json" ) , "w" ) as f: json.dump(__A , __A ) with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(__A , __A ) UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A ) # Initialize the embeddings of the special tokens UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["@"] )[0] UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["#"] )[0] UpperCamelCase__ = state_dict["embeddings.word_embeddings.weight"] UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 ) UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 ) UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: UpperCamelCase__ = state_dict[bias_name] UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 ) UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 ) UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: UpperCamelCase__ = F'''encoder.layer.{layer_index}.attention.self.''' UpperCamelCase__ = state_dict[prefix + matrix_name] UpperCamelCase__ = state_dict[prefix + matrix_name] UpperCamelCase__ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks UpperCamelCase__ = state_dict["entity_embeddings.entity_embeddings.weight"] UpperCamelCase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' UpperCamelCase__ = state_dict["entity_predictions.bias"] UpperCamelCase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] ) UpperCamelCase__ = LukeForMaskedLM(config=__A ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) UpperCamelCase__ = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): UpperCamelCase__ = state_dict[key] else: UpperCamelCase__ = state_dict[key] UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__A , strict=__A ) if set(__A ) != {"luke.embeddings.position_ids"}: raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(__A ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A , task="entity_classification" ) UpperCamelCase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." UpperCamelCase__ = (0, 9) UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" ) UpperCamelCase__ = model(**__A ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base UpperCamelCase__ = torch.Size((1, 33, 768) ) UpperCamelCase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base UpperCamelCase__ = torch.Size((1, 1, 768) ) UpperCamelCase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A ) UpperCamelCase__ = "Tokyo is the capital of <mask>." UpperCamelCase__ = (24, 30) UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" ) UpperCamelCase__ = model(**__A ) UpperCamelCase__ = encoding["input_ids"][0].tolist() UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(__A ) UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item() UpperCamelCase__ = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(__A ) ) model.save_pretrained(__A ) def _UpperCamelCase ( __A ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = ["[MASK]", "[PAD]", "[UNK]"] UpperCamelCase__ = [json.loads(__A ) for line in open(__A )] UpperCamelCase__ = {} for entry in data: UpperCamelCase__ = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: UpperCamelCase__ = entity_id break UpperCamelCase__ = F'''{language}:{entity_name}''' UpperCamelCase__ = entity_id return new_mapping if __name__ == "__main__": a__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) a__ : Any = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
80
1
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __A = 'src/transformers' __A = 'docs/source/en/tasks' def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: Tuple , _lowerCamelCase: int ) -> List[Any]: '''simple docstring''' with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCamelCase : Any = f.readlines() # Find the start prompt. __lowerCamelCase : str = 0 while not lines[start_index].startswith(_UpperCamelCase ): start_index += 1 start_index += 1 __lowerCamelCase : Tuple = start_index while not lines[end_index].startswith(_UpperCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __A = direct_transformers_import(TRANSFORMERS_PATH) __A = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __A = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowercase_ ( _lowerCamelCase: Optional[int] ) -> str: '''simple docstring''' __lowerCamelCase : Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide] __lowerCamelCase : Optional[int] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCamelCase , set() ) __lowerCamelCase : Any = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Tuple=False ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = _find_text_in_file( filename=os.path.join(_UpperCamelCase , _UpperCamelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) __lowerCamelCase : str = get_model_list_for_task(_UpperCamelCase ) if current_list != new_list: if overwrite: with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" " to fix this." ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __A = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
368
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _snake_case ( a__ ): def __init__( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ): super().__init__() # make sure scheduler can always be converted to DDIM __lowerCamelCase : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__( self : str , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCAmelCase ): __lowerCamelCase : Any = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: __lowerCamelCase : Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) __lowerCamelCase : str = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __lowerCamelCase : Any = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __lowerCamelCase : Union[str, Any] = self.scheduler.step( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , eta=UpperCAmelCase , use_clipped_model_output=UpperCAmelCase , generator=UpperCAmelCase ).prev_sample __lowerCamelCase : Any = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCamelCase : str = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
64
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""", # See all CANINE models at https://huggingface.co/models?filter=canine } class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = """canine""" def __init__( self , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1_6384 , __lowerCamelCase=16 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=0 , __lowerCamelCase=0XE_0_0_0 , __lowerCamelCase=0XE_0_0_1 , __lowerCamelCase=4 , __lowerCamelCase=4 , __lowerCamelCase=8 , __lowerCamelCase=1_6384 , __lowerCamelCase=128 , **__lowerCamelCase , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) __A : Any = max_position_embeddings __A : Dict = hidden_size __A : Tuple = num_hidden_layers __A : int = num_attention_heads __A : int = intermediate_size __A : Optional[int] = hidden_act __A : Optional[Any] = hidden_dropout_prob __A : Any = attention_probs_dropout_prob __A : Any = initializer_range __A : Tuple = type_vocab_size __A : Tuple = layer_norm_eps # Character config: __A : Optional[Any] = downsampling_rate __A : List[str] = upsampling_kernel_size __A : List[Any] = num_hash_functions __A : Optional[int] = num_hash_buckets __A : Dict = local_transformer_stride
179
"""simple docstring""" import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration a_ = 50000 a_ = 5000 a_ , a_ = os.path.split(__file__) a_ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Tuple ) ->Tuple: '''simple docstring''' for i in range(snake_case_ ): __A : int = dataset[i] @get_duration def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Optional[Any] ,snake_case_ : int ) ->Tuple: '''simple docstring''' for i in range(0 ,len(snake_case_ ) ,snake_case_ ): __A : List[str] = dataset[i : i + batch_size] @get_duration def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : List[Any] ,snake_case_ : Any ) ->int: '''simple docstring''' with dataset.formatted_as(type=snake_case_ ): for i in range(snake_case_ ): __A : Union[str, Any] = dataset[i] @get_duration def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Any ,snake_case_ : Union[str, Any] ,snake_case_ : Optional[int] ) ->Union[str, Any]: '''simple docstring''' with dataset.formatted_as(type=snake_case_ ): for i in range(0 ,snake_case_ ,snake_case_ ): __A : Dict = dataset[i : i + batch_size] def __lowercase ( ) ->Optional[int]: '''simple docstring''' __A : int = {'''num examples''': SPEED_TEST_N_EXAMPLES} __A : Optional[int] = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}), ] __A : int = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('''generating dataset''' ) __A : Any = datasets.Features( {'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} ) __A : List[Any] = generate_example_dataset( os.path.join(snake_case_ ,'''dataset.arrow''' ) ,snake_case_ ,num_examples=snake_case_ ,seq_shapes={'''list''': (100,)} ,) print('''first set of iterations''' ) for func, kwargs in functions: print(func.__name__ ,str(snake_case_ ) ) __A : Dict = func(snake_case_ ,**snake_case_ ) print('''shuffling dataset''' ) __A : int = dataset.shuffle() print('''Second set of iterations (after shuffling''' ) for func, kwargs in functions_shuffled: print('''shuffled ''' ,func.__name__ ,str(snake_case_ ) ) __A : Optional[Any] = func( snake_case_ ,**snake_case_ ) with open(snake_case_ ,'''wb''' ) as f: f.write(json.dumps(snake_case_ ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
179
1
"""simple docstring""" import copy import re class __magic_name__ : """simple docstring""" __UpperCamelCase = '''hp''' __UpperCamelCase = {} __UpperCamelCase = None @classmethod def SCREAMING_SNAKE_CASE ( cls :List[str] , snake_case :List[str] , snake_case :str ): '''simple docstring''' A_ : Optional[Any] = prefix A_ : int = defaults cls.build_naming_info() @staticmethod def SCREAMING_SNAKE_CASE ( snake_case :int , snake_case :Optional[Any] ): '''simple docstring''' if len(__SCREAMING_SNAKE_CASE ) == 0: return "" A_ : Dict = None if any(char.isdigit() for char in word ): raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 ): A_ : Any = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A_ : int = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(snake_case :Optional[int] ): A_ : str = "" while integer != 0: A_ : Tuple = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s A_ : Tuple = 0 while True: A_ : Tuple = word + "#" + int_to_alphabetic(__SCREAMING_SNAKE_CASE ) if sword in info["reverse_short_word"]: continue else: A_ : str = sword break A_ : Union[str, Any] = short_word A_ : List[str] = word return short_word @staticmethod def SCREAMING_SNAKE_CASE ( snake_case :Optional[int] , snake_case :Union[str, Any] ): '''simple docstring''' A_ : Optional[Any] = param_name.split("_" ) A_ : Tuple = [TrialShortNamer.shortname_for_word(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A_ : Optional[int] = ["", "_"] for separator in separators: A_ : str = separator.join(__SCREAMING_SNAKE_CASE ) if shortname not in info["reverse_short_param"]: A_ : str = shortname A_ : str = param_name return shortname return param_name @staticmethod def SCREAMING_SNAKE_CASE ( snake_case :Union[str, Any] , snake_case :Any ): '''simple docstring''' A_ : str = TrialShortNamer.shortname_for_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) A_ : Dict = short_name A_ : Union[str, Any] = param_name @classmethod def SCREAMING_SNAKE_CASE ( cls :Optional[Any] ): '''simple docstring''' if cls.NAMING_INFO is not None: return A_ : int = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } A_ : int = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = info @classmethod def SCREAMING_SNAKE_CASE ( cls :Optional[int] , snake_case :List[str] ): '''simple docstring''' cls.build_naming_info() assert cls.PREFIX is not None A_ : str = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"You should provide a default value for the param name {k} with value {v}" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A_ : Any = cls.NAMING_INFO["short_param"][k] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): A_ : str = 1 if v else 0 A_ : int = "" if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) else "-" A_ : List[str] = f"{key}{sep}{v}" name.append(__SCREAMING_SNAKE_CASE ) return "_".join(__SCREAMING_SNAKE_CASE ) @classmethod def SCREAMING_SNAKE_CASE ( cls :Optional[int] , snake_case :Tuple ): '''simple docstring''' A_ : Any = repr[len(cls.PREFIX ) + 1 :] if repr == "": A_ : List[str] = [] else: A_ : List[Any] = repr.split("_" ) A_ : int = {} for value in values: if "-" in value: A_ , A_ : List[Any] = value.split("-" ) else: A_ : Tuple = re.sub("[0-9.]" , "" , __SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = float(re.sub("[^0-9.]" , "" , __SCREAMING_SNAKE_CASE ) ) A_ : Tuple = cls.NAMING_INFO["reverse_short_param"][p_k] A_ : Tuple = p_v for k in cls.DEFAULTS: if k not in parameters: A_ : Dict = cls.DEFAULTS[k] return parameters
363
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Any = { '''configuration_clap''': [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapAudioConfig''', '''ClapConfig''', '''ClapTextConfig''', ], '''processing_clap''': ['''ClapProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapModel''', '''ClapPreTrainedModel''', '''ClapTextModel''', '''ClapTextModelWithProjection''', '''ClapAudioModel''', '''ClapAudioModelWithProjection''', ] _lowerCAmelCase : int = ['''ClapFeatureExtractor'''] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys _lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
0
"""simple docstring""" from itertools import permutations def _lowerCAmelCase ( lowercase_ ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def _lowerCAmelCase ( lowercase_ = 10 ): return sum( int(''.join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
78
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
0
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow a_ : Union[str, Any] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-classification", "language-modeling", "summarization", "token-classification", "question-answering", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) a_ : Tuple = logging.getLogger() def _A () -> Union[str, Any]: '''simple docstring''' _a = argparse.ArgumentParser() parser.add_argument('-f' ) _a = parser.parse_args() return args.f def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]="eval" ) -> List[str]: '''simple docstring''' _a = os.path.join(lowerCAmelCase__ , f'{split}_results.json' ) if os.path.exists(lowerCAmelCase__ ): with open(lowerCAmelCase__ , 'r' ) as f: return json.load(lowerCAmelCase__ ) raise ValueError(f'can\'t find {path}' ) a_ : Dict = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class a ( _SCREAMING_SNAKE_CASE ): def __UpperCAmelCase ( self ) -> Optional[int]: _a = self.get_auto_remove_tmp_dir() _a = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split() with patch.object(__magic_name__ , 'argv' , __magic_name__ ): run_flax_glue.main() _a = get_results(__magic_name__ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) @slow def __UpperCAmelCase ( self ) -> List[str]: _a = self.get_auto_remove_tmp_dir() _a = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split() with patch.object(__magic_name__ , 'argv' , __magic_name__ ): run_clm_flax.main() _a = get_results(__magic_name__ ) self.assertLess(result['eval_perplexity'] , 1_00 ) @slow def __UpperCAmelCase ( self ) -> List[Any]: _a = self.get_auto_remove_tmp_dir() _a = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split() with patch.object(__magic_name__ , 'argv' , __magic_name__ ): run_summarization_flax.main() _a = get_results(__magic_name__ , split='test' ) self.assertGreaterEqual(result['test_rouge1'] , 10 ) self.assertGreaterEqual(result['test_rouge2'] , 2 ) self.assertGreaterEqual(result['test_rougeL'] , 7 ) self.assertGreaterEqual(result['test_rougeLsum'] , 7 ) @slow def __UpperCAmelCase ( self ) -> Optional[Any]: _a = self.get_auto_remove_tmp_dir() _a = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split() with patch.object(__magic_name__ , 'argv' , __magic_name__ ): run_mlm_flax.main() _a = get_results(__magic_name__ ) self.assertLess(result['eval_perplexity'] , 42 ) @slow def __UpperCAmelCase ( self ) -> Optional[Any]: _a = self.get_auto_remove_tmp_dir() _a = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split() with patch.object(__magic_name__ , 'argv' , __magic_name__ ): run_ta_mlm_flax.main() _a = get_results(__magic_name__ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.4_2 ) @slow def __UpperCAmelCase ( self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _a = 7 if get_gpu_count() > 1 else 2 _a = self.get_auto_remove_tmp_dir() _a = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split() with patch.object(__magic_name__ , 'argv' , __magic_name__ ): run_flax_ner.main() _a = get_results(__magic_name__ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertGreaterEqual(result['eval_f1'] , 0.3 ) @slow def __UpperCAmelCase ( self ) -> List[Any]: _a = self.get_auto_remove_tmp_dir() _a = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split() with patch.object(__magic_name__ , 'argv' , __magic_name__ ): run_qa.main() _a = get_results(__magic_name__ ) self.assertGreaterEqual(result['eval_f1'] , 30 ) self.assertGreaterEqual(result['eval_exact'] , 30 )
104
'''simple docstring''' def _A (lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[int] ) -> None: '''simple docstring''' _a = len(lowerCAmelCase__ ) print('The following activities are selected:' ) # The first activity is always selected _a = 0 print(lowerCAmelCase__ , end=',' ) # Consider rest of the activities for j in range(lowerCAmelCase__ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowerCAmelCase__ , end=',' ) _a = j if __name__ == "__main__": import doctest doctest.testmod() a_ : List[str] = [1, 3, 0, 5, 8, 5] a_ : str = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
104
1
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('socket.socket' ) @patch('builtins.open' ) def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" __lowerCamelCase = Mock() __lowerCamelCase = conn, Mock() __lowerCamelCase = iter([1, None] ) __lowerCamelCase = lambda UpperCamelCase__ : next(UpperCamelCase__ ) # ===== invoke ===== send_file(filename='mytext.txt' , testing=UpperCamelCase__ ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
90
import flax.linen as nn import jax import jax.numpy as jnp class lowerCAmelCase ( nn.Module ): UpperCAmelCase__ = 42 UpperCAmelCase__ = jnp.floataa def A_ ( self : Any ) -> Any: lowerCamelCase__ : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[Any]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = hidden_states.shape lowerCamelCase__ : Union[str, Any] = jax.image.resize( UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase ) return hidden_states class lowerCAmelCase ( nn.Module ): UpperCAmelCase__ = 42 UpperCAmelCase__ = jnp.floataa def A_ ( self : List[str] ) -> int: lowerCamelCase__ : Tuple = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : str , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase ) return hidden_states class lowerCAmelCase ( nn.Module ): UpperCAmelCase__ = 42 UpperCAmelCase__ = None UpperCAmelCase__ = 0.0 UpperCAmelCase__ = None UpperCAmelCase__ = jnp.floataa def A_ ( self : List[str] ) -> Union[str, Any]: lowerCamelCase__ : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels lowerCamelCase__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) lowerCamelCase__ : int = nn.Conv( UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCamelCase__ : Union[str, Any] = nn.Dense(UpperCAmelCase , dtype=self.dtype ) lowerCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) lowerCamelCase__ : List[Any] = nn.Dropout(self.dropout_prob ) lowerCamelCase__ : Tuple = nn.Conv( UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCamelCase__ : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut lowerCamelCase__ : Union[str, Any] = None if use_nin_shortcut: lowerCamelCase__ : Dict = nn.Conv( UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=True ) -> Optional[int]: lowerCamelCase__ : Union[str, Any] = hidden_states lowerCamelCase__ : List[Any] = self.norma(UpperCAmelCase ) lowerCamelCase__ : List[Any] = nn.swish(UpperCAmelCase ) lowerCamelCase__ : Any = self.conva(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase ) ) lowerCamelCase__ : List[str] = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase , 1 ) , 1 ) lowerCamelCase__ : List[str] = hidden_states + temb lowerCamelCase__ : Optional[Any] = self.norma(UpperCAmelCase ) lowerCamelCase__ : List[str] = nn.swish(UpperCAmelCase ) lowerCamelCase__ : Optional[int] = self.dropout(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : str = self.conva(UpperCAmelCase ) if self.conv_shortcut is not None: lowerCamelCase__ : Dict = self.conv_shortcut(UpperCAmelCase ) return hidden_states + residual
50
0
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets a : Union[str, Any] ="""\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n""" a : Optional[Any] ="""\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n""" a : str =r"""\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {\'accuracy\': 1.0}\n""" @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): def lowerCAmelCase ( self : List[str]): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string'), 'references': datasets.Value('string'), }) ,homepage='https://github.com/hendrycks/math' ,codebase_urls=['https://github.com/hendrycks/math'] ,) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int): __lowerCamelCase : str = 0.0 for i, j in zip(UpperCamelCase_ ,UpperCamelCase_): n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase_ ,UpperCamelCase_) else 0.0 __lowerCamelCase : Optional[int] = n_correct / len(UpperCamelCase_) return { "accuracy": accuracy, }
355
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : Optional[int]): __lowerCamelCase : List[Any] = logging.get_logger() # the current default level is logging.WARNING __lowerCamelCase : Dict = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity()) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity()) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity()) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity()) # restore to the original level logging.set_verbosity(SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Optional[int] = logging.get_verbosity() __lowerCamelCase : str = logging.get_logger('transformers.models.bart.tokenization_bart') __lowerCamelCase : Tuple = 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl: logger.warning(SCREAMING_SNAKE_CASE__) self.assertEqual(cl.out ,msg + '\n') # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl: logger.warning(SCREAMING_SNAKE_CASE__) self.assertEqual(cl.out ,'') # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl: logger.warning(SCREAMING_SNAKE_CASE__) self.assertEqual(cl.out ,msg + '\n') # restore to the original level logging.set_verbosity(SCREAMING_SNAKE_CASE__) @mockenv(TRANSFORMERS_VERBOSITY='error') def lowerCAmelCase ( self : Tuple): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowerCamelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart') __lowerCamelCase : int = os.getenv('TRANSFORMERS_VERBOSITY' ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = logging.log_levels[env_level_str] __lowerCamelCase : Tuple = logging.get_verbosity() self.assertEqual( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" ,) # restore to the original level __lowerCamelCase : List[str] = '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error') def lowerCAmelCase ( self : List[Any]): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowerCamelCase : List[str] = logging.logging.getLogger() with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart') self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' ,cl.out) # no need to restore as nothing was changed def lowerCAmelCase ( self : Any): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowerCamelCase : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart') __lowerCamelCase : Optional[int] = 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1'): # nothing should be logged as env var disables this method with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl: logger.warning_advice(SCREAMING_SNAKE_CASE__) self.assertEqual(cl.out ,'') with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl: logger.warning_advice(SCREAMING_SNAKE_CASE__) self.assertEqual(cl.out ,msg + '\n') def SCREAMING_SNAKE_CASE__ ( ) -> Any: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
113
0
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __magic_name__ ( self : int ) -> Optional[int]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __magic_name__ ( self : Optional[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[Any] =ort.SessionOptions() SCREAMING_SNAKE_CASE__ : Any =False return options def __magic_name__ ( self : str ) -> int: SCREAMING_SNAKE_CASE__ : Optional[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) SCREAMING_SNAKE_CASE__ : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) SCREAMING_SNAKE_CASE__ : int =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Any ='''A red cat sitting on a park bench''' SCREAMING_SNAKE_CASE__ : Optional[int] =np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ : str =pipe( prompt=__lowercase , image=__lowercase , mask_image=__lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowercase , output_type='''np''' , ) SCREAMING_SNAKE_CASE__ : Tuple =output.images SCREAMING_SNAKE_CASE__ : Union[str, Any] =images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE__ : List[str] =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) SCREAMING_SNAKE_CASE__ : Any =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) SCREAMING_SNAKE_CASE__ : List[Any] =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) SCREAMING_SNAKE_CASE__ : List[Any] =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] ='''A red cat sitting on a park bench''' SCREAMING_SNAKE_CASE__ : Dict =np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe( prompt=__lowercase , image=__lowercase , mask_image=__lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowercase , output_type='''np''' , ) SCREAMING_SNAKE_CASE__ : Tuple =output.images SCREAMING_SNAKE_CASE__ : Union[str, Any] =images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE__ : str =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
152
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = CanineTokenizer snake_case_ = False def __magic_name__ ( self : Any ) -> List[Any]: super().setUp() SCREAMING_SNAKE_CASE__ : int =CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__ ( self : Optional[int] ) -> List[str]: return CanineTokenizer.from_pretrained('''google/canine-s''' ) def __magic_name__ ( self : Optional[int] , **__lowercase : int ) -> CanineTokenizer: SCREAMING_SNAKE_CASE__ : int =self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =10_24 return tokenizer @require_torch def __magic_name__ ( self : List[Any] ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.canine_tokenizer SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.'''] # fmt: off SCREAMING_SNAKE_CASE__ : List[Any] =[5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''' ) self.assertIsInstance(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =list(batch.input_ids.numpy()[0] ) self.assertListEqual(__lowercase , __lowercase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def __magic_name__ ( self : Any ) -> List[str]: SCREAMING_SNAKE_CASE__ : Dict =self.canine_tokenizer SCREAMING_SNAKE_CASE__ : str =['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.'''] SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('''input_ids''' , __lowercase ) self.assertIn('''attention_mask''' , __lowercase ) self.assertIn('''token_type_ids''' , __lowercase ) @require_torch def __magic_name__ ( self : Dict ) -> List[str]: SCREAMING_SNAKE_CASE__ : List[str] =self.canine_tokenizer SCREAMING_SNAKE_CASE__ : Dict =[ '''What\'s the weater?''', '''It\'s about 25 degrees.''', ] SCREAMING_SNAKE_CASE__ : int =tokenizer( text_target=__lowercase , max_length=32 , padding='''max_length''' , truncation=__lowercase , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def __magic_name__ ( self : List[str] ) -> Any: # safety check on max_len default value so we are sure the test works SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test SCREAMING_SNAKE_CASE__ : int =self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE__ : List[str] =tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ : Dict =''' He is very happy, UNwant\u00E9d,running''' SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) tokenizer.save_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.__class__.from_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) shutil.rmtree(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE__ : Union[str, Any] =tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ : Union[str, Any] =''' He is very happy, UNwant\u00E9d,running''' SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: SCREAMING_SNAKE_CASE__ : str =chr(0xE007 ) additional_special_tokens.append(__lowercase ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) tokenizer.save_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.__class__.from_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) self.assertIn(__lowercase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.__class__.from_pretrained(__lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__lowercase ) def __magic_name__ ( self : Optional[int] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_clean_sequence(__lowercase ) # a special token for Canine can be defined as follows: SCREAMING_SNAKE_CASE__ : Optional[int] =0xE005 SCREAMING_SNAKE_CASE__ : Any =chr(__lowercase ) tokenizer.add_special_tokens({'''cls_token''': special_token} ) SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertEqual(len(__lowercase ) , 1 ) SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertEqual(__lowercase , input_encoded + special_token_id ) SCREAMING_SNAKE_CASE__ : Any =tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) self.assertTrue(special_token not in decoded ) def __magic_name__ ( self : int ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE__ : Tuple =chr(0xE005 ) SCREAMING_SNAKE_CASE__ : List[Any] =chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowercase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} ) SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =tokenizer.tokenize(__lowercase ) self.assertEqual(len(__lowercase ) , 1 ) self.assertEqual(len(__lowercase ) , 1 ) self.assertEqual(token_a[0] , __lowercase ) self.assertEqual(token_a[0] , __lowercase ) @require_tokenizers def __magic_name__ ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # a special token for Canine can be defined as follows: SCREAMING_SNAKE_CASE__ : str =0xE006 SCREAMING_SNAKE_CASE__ : int =chr(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =AddedToken(__lowercase , lstrip=__lowercase ) tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__lowercase ) tokenizer.from_pretrained(__lowercase ) def __magic_name__ ( self : Optional[int] ) -> int: SCREAMING_SNAKE_CASE__ : int =[] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: SCREAMING_SNAKE_CASE__ : List[Any] =json.load(__lowercase ) with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: SCREAMING_SNAKE_CASE__ : Dict =json.load(__lowercase ) # a special token for Canine can be defined as follows: SCREAMING_SNAKE_CASE__ : Optional[Any] =0xE006 SCREAMING_SNAKE_CASE__ : Dict =chr(__lowercase ) SCREAMING_SNAKE_CASE__ : str =[new_token_a] SCREAMING_SNAKE_CASE__ : Optional[Any] =[new_token_a] with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__lowercase , __lowercase ) with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__lowercase , __lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer_class.from_pretrained(__lowercase , extra_ids=0 ) self.assertIn(__lowercase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) SCREAMING_SNAKE_CASE__ : str =0xE007 SCREAMING_SNAKE_CASE__ : Optional[int] =chr(__lowercase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained SCREAMING_SNAKE_CASE__ : Tuple =[AddedToken(__lowercase , lstrip=__lowercase )] SCREAMING_SNAKE_CASE__ : Any =tokenizer_class.from_pretrained( __lowercase , additional_special_tokens=__lowercase , extra_ids=0 ) self.assertIn(__lowercase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def __magic_name__ ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =self.get_tokenizers(do_lower_case=__lowercase ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE__ : List[str] ='''hello world''' if self.space_between_special_tokens: SCREAMING_SNAKE_CASE__ : str ='''[CLS] hello world [SEP]''' else: SCREAMING_SNAKE_CASE__ : int =input SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__lowercase , [output, output.lower()] ) def __magic_name__ ( self : str ) -> Dict: SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE__ : str =[ '''bos_token''', '''eos_token''', '''unk_token''', '''sep_token''', '''pad_token''', '''cls_token''', '''mask_token''', ] SCREAMING_SNAKE_CASE__ : Tuple ='''a''' SCREAMING_SNAKE_CASE__ : Tuple =ord(__lowercase ) for attr in attributes_list: setattr(__lowercase , attr + '''_id''' , __lowercase ) self.assertEqual(getattr(__lowercase , __lowercase ) , __lowercase ) self.assertEqual(getattr(__lowercase , attr + '''_id''' ) , __lowercase ) setattr(__lowercase , attr + '''_id''' , __lowercase ) self.assertEqual(getattr(__lowercase , __lowercase ) , __lowercase ) self.assertEqual(getattr(__lowercase , attr + '''_id''' ) , __lowercase ) setattr(__lowercase , '''additional_special_tokens_ids''' , [] ) self.assertListEqual(getattr(__lowercase , '''additional_special_tokens''' ) , [] ) self.assertListEqual(getattr(__lowercase , '''additional_special_tokens_ids''' ) , [] ) SCREAMING_SNAKE_CASE__ : str =0xE006 SCREAMING_SNAKE_CASE__ : List[str] =chr(__lowercase ) setattr(__lowercase , '''additional_special_tokens_ids''' , [additional_special_token_id] ) self.assertListEqual(getattr(__lowercase , '''additional_special_tokens''' ) , [additional_special_token] ) self.assertListEqual(getattr(__lowercase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] ) def __magic_name__ ( self : str ) -> Dict: pass def __magic_name__ ( self : List[Any] ) -> List[Any]: pass def __magic_name__ ( self : Any ) -> int: pass def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def __magic_name__ ( self : List[Any] ) -> Optional[int]: pass def __magic_name__ ( self : Tuple ) -> Optional[Any]: pass def __magic_name__ ( self : Dict ) -> Dict: pass def __magic_name__ ( self : List[str] ) -> Dict: pass
152
1
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _A ( __magic_name__ , __magic_name__ ): lowercase__ = torch.load(__magic_name__ , map_location="cpu" ) lowercase__ = chkpt["model"] # We have the base model one level deeper than the original XLM repository lowercase__ = {} for k, v in state_dict.items(): if "pred_layer" in k: lowercase__ = v else: lowercase__ = v lowercase__ = chkpt["params"] lowercase__ = {n: v for n, v in config.items() if not isinstance(__magic_name__ , (torch.FloatTensor, numpy.ndarray) )} lowercase__ = chkpt["dico_word2id"] lowercase__ = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()} # Save pytorch-model lowercase__ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME lowercase__ = pytorch_dump_folder_path + "/" + CONFIG_NAME lowercase__ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(__magic_name__ , __magic_name__ ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__magic_name__ , indent=2 ) + "\n" ) print(f'''Save vocab file to {pytorch_config_dump_path}''' ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__magic_name__ , indent=2 ) + "\n" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _snake_case = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
355
from __future__ import annotations def _A ( __magic_name__ , __magic_name__ ): lowercase__ = [] create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ ) return result def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): if level == 0: total_list.append(current_list[:] ) return for i in range(__magic_name__ , total_number - level + 2 ): current_list.append(__magic_name__ ) create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ ) current_list.pop() def _A ( __magic_name__ ): for i in total_list: print(*__magic_name__ ) if __name__ == "__main__": _snake_case = 4 _snake_case = 2 _snake_case = generate_all_combinations(n, k) print_all_state(total_list)
201
0
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class lowerCAmelCase__ ( nn.Module ): lowerCAmelCase_ = 42 lowerCAmelCase_ = jnp.floataa def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = hidden_states.shape lowercase_ : Dict = jax.image.resize( __SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) lowercase_ : Optional[Any] = self.conv(__SCREAMING_SNAKE_CASE ) return hidden_states class lowerCAmelCase__ ( nn.Module ): lowerCAmelCase_ = 42 lowerCAmelCase_ = jnp.floataa def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Tuple = self.conv(__SCREAMING_SNAKE_CASE ) return hidden_states class lowerCAmelCase__ ( nn.Module ): lowerCAmelCase_ = 42 lowerCAmelCase_ = None lowerCAmelCase_ = 0.0 lowerCAmelCase_ = None lowerCAmelCase_ = jnp.floataa def _snake_case ( self ): """simple docstring""" lowercase_ : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels lowercase_ : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) lowercase_ : int = nn.Conv( __SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowercase_ : Optional[Any] = nn.Dense(__SCREAMING_SNAKE_CASE , dtype=self.dtype ) lowercase_ : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) lowercase_ : str = nn.Dropout(self.dropout_prob ) lowercase_ : Optional[int] = nn.Conv( __SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowercase_ : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut lowercase_ : List[Any] = None if use_nin_shortcut: lowercase_ : Tuple = nn.Conv( __SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ): """simple docstring""" lowercase_ : Union[str, Any] = hidden_states lowercase_ : List[str] = self.norma(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = nn.swish(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = self.conva(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = self.time_emb_proj(nn.swish(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Tuple = jnp.expand_dims(jnp.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , 1 ) lowercase_ : Dict = hidden_states + temb lowercase_ : Dict = self.norma(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = nn.swish(__SCREAMING_SNAKE_CASE ) lowercase_ : int = self.dropout(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = self.conva(__SCREAMING_SNAKE_CASE ) if self.conv_shortcut is not None: lowercase_ : Any = self.conv_shortcut(__SCREAMING_SNAKE_CASE ) return hidden_states + residual
93
'''simple docstring''' _lowercase : int = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)] def snake_case_ ( __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution _lowercase : list[bool | None] = [None] * 1_0_0_0_0_0_0_0 _lowercase : List[str] = True _lowercase : Optional[int] = False def snake_case_ ( __SCREAMING_SNAKE_CASE : int ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowercase_ : Tuple = chain(next_number(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Union[str, Any] = number_chain while number < 10000000: lowercase_ : int = number_chain number *= 10 return number_chain def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10000000 ): """simple docstring""" for i in range(1 , __SCREAMING_SNAKE_CASE ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution() = }""")
93
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __UpperCAmelCase ): __snake_case = 42 __snake_case = 42 __snake_case = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
351
from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class A ( __UpperCAmelCase ): __snake_case = 'gpt_neox' def __init__( self, UpperCamelCase__=5_0432, UpperCamelCase__=6144, UpperCamelCase__=44, UpperCamelCase__=64, UpperCamelCase__=2_4576, UpperCamelCase__="gelu", UpperCamelCase__=0.25, UpperCamelCase__=1_0000, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__=2048, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=True, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ): """simple docstring""" super().__init__(bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = rotary_pct lowerCAmelCase_ = rotary_emb_base lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = hidden_dropout lowerCAmelCase_ = classifier_dropout lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = use_cache lowerCAmelCase_ = tie_word_embeddings lowerCAmelCase_ = use_parallel_residual lowerCAmelCase_ = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling, UpperCamelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"got {self.rope_scaling}" ) lowerCAmelCase_ = self.rope_scaling.get('''type''', UpperCamelCase__ ) lowerCAmelCase_ = self.rope_scaling.get('''factor''', UpperCamelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(UpperCamelCase__, UpperCamelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
167
0
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : Tuple) -> List[Any]: """simple docstring""" a__ : Union[str, Any] = old_name if "patch_embed" in old_name: a__ , a__ , a__ : Union[str, Any] = old_name.split(""".""") if layer == "0": a__ : List[Any] = old_name.replace("""0""" , """convolution1""") elif layer == "1": a__ : str = old_name.replace("""1""" , """batchnorm_before""") elif layer == "3": a__ : str = old_name.replace("""3""" , """convolution2""") else: a__ : str = old_name.replace("""4""" , """batchnorm_after""") if "network" in old_name and re.search(R"""\d\.\d""" , _lowercase): a__ : Dict = R"""\b\d{2}\b""" if bool(re.search(_lowercase , _lowercase)): a__ : Any = re.search(R"""\d\.\d\d.""" , _lowercase).group() else: a__ : List[Any] = re.search(R"""\d\.\d.""" , _lowercase).group() if int(match[0]) < 6: a__ : Optional[Any] = old_name.replace(_lowercase , """""") a__ : Optional[Any] = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1]) a__ : List[Any] = """intermediate_stages.""" + trimmed_name else: a__ : Optional[int] = old_name.replace(_lowercase , """""") if int(match[2]) < num_meta4D_last_stage: a__ : List[Any] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2]) else: a__ : Any = str(int(match[2]) - num_meta4D_last_stage) a__ : str = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index) if "norm1" in old_name: a__ : str = trimmed_name.replace("""norm1""" , """layernorm1""") elif "norm2" in old_name: a__ : Optional[Any] = trimmed_name.replace("""norm2""" , """layernorm2""") elif "fc1" in old_name: a__ : int = trimmed_name.replace("""fc1""" , """linear_in""") elif "fc2" in old_name: a__ : Tuple = trimmed_name.replace("""fc2""" , """linear_out""") a__ : Optional[int] = """last_stage.""" + trimmed_name elif "network" in old_name and re.search(R""".\d.""" , _lowercase): a__ : Optional[Any] = old_name.replace("""network""" , """intermediate_stages""") if "fc" in new_name: a__ : List[Any] = new_name.replace("""fc""" , """convolution""") elif ("norm1" in new_name) and ("layernorm1" not in new_name): a__ : Optional[int] = new_name.replace("""norm1""" , """batchnorm_before""") elif ("norm2" in new_name) and ("layernorm2" not in new_name): a__ : Tuple = new_name.replace("""norm2""" , """batchnorm_after""") if "proj" in new_name: a__ : List[str] = new_name.replace("""proj""" , """projection""") if "dist_head" in new_name: a__ : int = new_name.replace("""dist_head""" , """distillation_classifier""") elif "head" in new_name: a__ : Any = new_name.replace("""head""" , """classifier""") elif "patch_embed" in new_name: a__ : Tuple = """efficientformer.""" + new_name elif new_name == "norm.weight" or new_name == "norm.bias": a__ : int = new_name.replace("""norm""" , """layernorm""") a__ : Optional[int] = """efficientformer.""" + new_name else: a__ : Tuple = """efficientformer.encoder.""" + new_name return new_name def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : Optional[Any]) -> int: """simple docstring""" for key in checkpoint.copy().keys(): a__ : Tuple = checkpoint.pop(_lowercase) a__ : Union[str, Any] = val return checkpoint def lowerCAmelCase_ ( ) -> Union[str, Any]: """simple docstring""" a__ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" a__ : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase).raw) return image def lowerCAmelCase_ ( _lowercase : Path , _lowercase : Path , _lowercase : Path , _lowercase : bool) -> List[str]: """simple docstring""" a__ : int = torch.load(_lowercase , map_location="""cpu""")["""model"""] a__ : Optional[Any] = EfficientFormerConfig.from_json_file(_lowercase) a__ : List[Any] = EfficientFormerForImageClassificationWithTeacher(_lowercase) a__ : Dict = """_""".join(checkpoint_path.split("""/""")[-1].split(""".""")[0].split("""_""")[:-1]) a__ : List[str] = config.depths[-1] - config.num_metaad_blocks + 1 a__ : Tuple = convert_torch_checkpoint(_lowercase , _lowercase) model.load_state_dict(_lowercase) model.eval() a__ : List[str] = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } # prepare image a__ : List[str] = prepare_img() a__ : int = 256 a__ : Any = 224 a__ : Optional[Any] = EfficientFormerImageProcessor( size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , ) a__ : List[str] = processor(images=_lowercase , return_tensors="""pt""").pixel_values # original processing pipeline a__ : str = Compose( [ Resize(_lowercase , interpolation=pillow_resamplings["""bicubic"""]), CenterCrop(_lowercase), ToTensor(), Normalize(_lowercase , _lowercase), ]) a__ : Tuple = image_transforms(_lowercase).unsqueeze(0) assert torch.allclose(_lowercase , _lowercase) a__ : Any = model(_lowercase) a__ : int = outputs.logits a__ : Optional[Any] = (1, 1000) if "l1" in model_name: a__ : Optional[Any] = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328]) assert torch.allclose(logits[0, :10] , _lowercase , atol=1e-3) assert logits.shape == expected_shape elif "l3" in model_name: a__ : List[str] = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127]) assert torch.allclose(logits[0, :10] , _lowercase , atol=1e-3) assert logits.shape == expected_shape elif "l7" in model_name: a__ : str = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878]) assert logits.shape == expected_shape else: raise ValueError( F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''') # Save Checkpoints Path(_lowercase).mkdir(exist_ok=_lowercase) model.save_pretrained(_lowercase) print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''') processor.save_pretrained(_lowercase) print(F'''Processor successfuly saved at {pytorch_dump_path}''') if push_to_hub: print("""Pushing model to the hub...""") model.push_to_hub( repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_lowercase , ) processor.push_to_hub( repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_lowercase , ) if __name__ == "__main__": _lowercase : List[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to EfficientFormer pytorch checkpoint.", ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for EfficientFormer model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) parser.set_defaults(push_to_hub=True) _lowercase : Any =parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
170
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[int] =logging.get_logger(__name__) _lowercase : Tuple ={ "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class snake_case__ (A__ ): """simple docstring""" __lowerCAmelCase :List[Any] = "swinv2" __lowerCAmelCase :List[str] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , __lowercase=2_2_4 , __lowercase=4 , __lowercase=3 , __lowercase=9_6 , __lowercase=[2, 2, 6, 2] , __lowercase=[3, 6, 1_2, 2_4] , __lowercase=7 , __lowercase=4.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=0.0_2 , __lowercase=1E-5 , __lowercase=3_2 , **__lowercase , ) -> Any: """simple docstring""" super().__init__(**__lowercase ) a__ : Optional[Any] = image_size a__ : Union[str, Any] = patch_size a__ : List[Any] = num_channels a__ : Union[str, Any] = embed_dim a__ : Any = depths a__ : List[str] = len(__lowercase ) a__ : Optional[Any] = num_heads a__ : Union[str, Any] = window_size a__ : Optional[int] = mlp_ratio a__ : List[str] = qkv_bias a__ : Dict = hidden_dropout_prob a__ : str = attention_probs_dropout_prob a__ : List[Any] = drop_path_rate a__ : Tuple = hidden_act a__ : Dict = use_absolute_embeddings a__ : Tuple = layer_norm_eps a__ : Tuple = initializer_range a__ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a__ : int = int(embed_dim * 2 ** (len(__lowercase ) - 1) ) a__ : Dict = (0, 0, 0, 0)
170
1
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": _snake_case : Optional[int] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) _snake_case : Optional[Any] = parser.parse_args() _snake_case : int = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
352
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class A : def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=99 , lowerCAmelCase_ : Optional[int]=13 , lowerCAmelCase_ : Tuple=16 , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=30 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Union[str, Any]=None , ) -> Any: """simple docstring""" _a = parent _a = batch_size _a = decoder_seq_length # For common tests _a = self.decoder_seq_length _a = is_training _a = use_attention_mask _a = use_labels _a = vocab_size _a = d_model _a = d_model _a = decoder_layers _a = decoder_layers _a = decoder_ffn_dim _a = decoder_attention_heads _a = decoder_attention_heads _a = eos_token_id _a = bos_token_id _a = pad_token_id _a = decoder_start_token_id _a = use_cache _a = max_position_embeddings _a = None _a = decoder_seq_length _a = 2 _a = 1 def __lowerCAmelCase ( self : str ) -> str: """simple docstring""" _a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _a = None if self.use_attention_mask: _a = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _a = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , ) -> int: """simple docstring""" _a = True _a = TrOCRDecoder(config=lowerCAmelCase_ ).to(lowerCAmelCase_ ).eval() _a = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _a = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ ) _a = model(lowerCAmelCase_ ) _a = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ ) self.parent.assertTrue(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) ) self.parent.assertTrue(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) + 1 ) _a = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids _a = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _a = torch.cat([input_ids, next_tokens] , dim=-1 ) _a = model(lowerCAmelCase_ )['''last_hidden_state'''] _a = model(lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )['''last_hidden_state'''] # select random slice _a = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _a = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) def __lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" _a = self.prepare_config_and_inputs() _a , _a , _a , _a = config_and_inputs _a = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class A ( _a ,_a ,_a ,unittest.TestCase ): lowercase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowercase_ = (TrOCRForCausalLM,) if is_torch_available() else () lowercase_ = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} lowercase_ = True lowercase_ = False def __lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" _a = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCAmelCase_ ) _a = ConfigTester(self , config_class=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> Any: """simple docstring""" pass def __lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" pass def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" pass def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" pass
179
0
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _a : Optional[Any] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
294
"""simple docstring""" from __future__ import annotations def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' print(F"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(UpperCamelCase__ ): print(F"""{i}\t\t{d}""" ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for j in range(UpperCamelCase__ ): _a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Dict = [float("""inf""" )] * vertex_count _a : Any = 0.0 for _ in range(vertex_count - 1 ): for j in range(UpperCamelCase__ ): _a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: _a : Any = distance[u] + w _a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() _snake_case = int(input('Enter number of vertices: ').strip()) _snake_case = int(input('Enter number of edges: ').strip()) _snake_case = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) _snake_case , _snake_case , _snake_case = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) _snake_case = {'src': src, 'dst': dest, 'weight': weight} _snake_case = int(input('\nEnter shortest path source:').strip()) _snake_case = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
294
1
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def __lowerCamelCase ( __UpperCamelCase="" ) -> str: """simple docstring""" lowerCAmelCase_ : Dict = tempfile.mkdtemp() return os.path.join(__UpperCamelCase , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase ( self : Any ): lowerCAmelCase_ : Union[str, Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5 lowerCAmelCase_ : int = AgentAudio(a_ ) lowerCAmelCase_ : Tuple = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(a_ , agent_type.to_raw() , atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(a_ ) ) # Ensure that the file contains the same value as the original tensor lowerCAmelCase_ , lowerCAmelCase_ : str = sf.read(a_ ) self.assertTrue(torch.allclose(a_ , torch.tensor(a_ ) , atol=1e-4 ) ) def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : Optional[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5 lowerCAmelCase_ : int = get_new_path(suffix=".wav" ) sf.write(a_ , a_ , 1_60_00 ) lowerCAmelCase_ : List[Any] = AgentAudio(a_ ) self.assertTrue(torch.allclose(a_ , agent_type.to_raw() , atol=1e-4 ) ) self.assertEqual(agent_type.to_string() , a_ ) @require_vision @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase ( self : Dict ): lowerCAmelCase_ : List[str] = torch.randint(0 , 2_56 , (64, 64, 3) ) lowerCAmelCase_ : Tuple = AgentImage(a_ ) lowerCAmelCase_ : Optional[Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(a_ , agent_type._tensor , atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(a_ ) ) def lowerCamelCase ( self : Dict ): lowerCAmelCase_ : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" lowerCAmelCase_ : Optional[int] = Image.open(a_ ) lowerCAmelCase_ : Any = AgentImage(a_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(a_ ) ) def lowerCamelCase ( self : Optional[Any] ): lowerCAmelCase_ : int = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" lowerCAmelCase_ : List[str] = Image.open(a_ ) lowerCAmelCase_ : Any = AgentImage(a_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(a_ ) ) class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : List[Any] = "Hey!" lowerCAmelCase_ : Any = AgentText(a_ ) self.assertEqual(a_ , agent_type.to_string() ) self.assertEqual(a_ , agent_type.to_raw() ) self.assertEqual(a_ , a_ )
161
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def __lowerCamelCase ( __UpperCamelCase ) -> np.ndarray: """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b def __lowerCamelCase ( __UpperCamelCase ) -> np.ndarray: """simple docstring""" return (gray > 127) & (gray <= 255) def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> np.ndarray: """simple docstring""" lowerCAmelCase_ : List[str] = np.zeros_like(__UpperCamelCase ) lowerCAmelCase_ : Dict = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image lowerCAmelCase_ : List[Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): lowerCAmelCase_ : List[str] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() lowerCAmelCase_ : int = int(summation > 0 ) return output if __name__ == "__main__": # read original image lowercase__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" lowercase__ = np.array(Image.open(lena_path)) # kernel to be applied lowercase__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) lowercase__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image lowercase__ = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
161
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _lowercase = 25_60_47 _lowercase = 25_61_45 @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = NllbTokenizer _lowerCamelCase: Dict = NllbTokenizerFast _lowerCamelCase: str = True _lowerCamelCase: int = True _lowerCamelCase: str = {} def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: super().setUp() # We have a SentencePiece fixture for testing A = NllbTokenizer(A_ ,keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = NllbTokenizer(A_ ,keep_accents=A_ ) A = tokenizer.tokenize('This is a test' ) self.assertListEqual(A_ ,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) A = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A_ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] ,) A = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) A = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] ,) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: A = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ ) A = self.tokenizer_class.from_pretrained(A_ ,**A_ ) A = tempfile.mkdtemp() A = tokenizer_r.save_pretrained(A_ ) A = tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) A = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(A_ ,A_ ) # Checks everything loads correctly in the same way A = tokenizer_r.from_pretrained(A_ ) A = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ ,A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=True A = tempfile.mkdtemp() A = tokenizer_r.save_pretrained(A_ ,legacy_format=A_ ) A = tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files self.assertSequenceEqual(A_ ,A_ ) # Checks everything loads correctly in the same way A = tokenizer_r.from_pretrained(A_ ) A = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ ,A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=False A = tempfile.mkdtemp() A = tokenizer_r.save_pretrained(A_ ,legacy_format=A_ ) A = tokenizer_p.save_pretrained(A_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way A = tokenizer_r.from_pretrained(A_ ) A = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ ,A_ ) ) shutil.rmtree(A_ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: if not self.test_seqaseq: return A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Longer text that will definitely require truncation. A = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for' ' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons' ' will only worsen the violence and misery for millions of people.', ] A = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al' ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' ' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] try: A = tokenizer.prepare_seqaseq_batch( src_texts=A_ ,tgt_texts=A_ ,max_length=3 ,max_target_length=10 ,return_tensors='pt' ,src_lang='eng_Latn' ,tgt_lang='ron_Latn' ,) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,10 ) # max_target_length will default to max_length if not specified A = tokenizer.prepare_seqaseq_batch( A_ ,tgt_texts=A_ ,max_length=3 ,return_tensors='pt' ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,3 ) A = tokenizer.prepare_seqaseq_batch( src_texts=A_ ,max_length=3 ,max_target_length=10 ,return_tensors='pt' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 ) self.assertNotIn('decoder_input_ids' ,A_ ) @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: pass def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A = [AddedToken('<special>' ,lstrip=A_ )] A = self.rust_tokenizer_class.from_pretrained( A_ ,additional_special_tokens=A_ ,**A_ ) A = tokenizer_r.encode('Hey this is a <special> token' ) A = tokenizer_r.encode('<special>' ,add_special_tokens=A_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: A = self.rust_tokenizer_class.from_pretrained( A_ ,additional_special_tokens=A_ ,**A_ ,) A = self.tokenizer_class.from_pretrained( A_ ,additional_special_tokens=A_ ,**A_ ) A = tokenizer_p.encode('Hey this is a <special> token' ) A = tokenizer_cr.encode('Hey this is a <special> token' ) self.assertEqual(A_ ,A_ ) self.assertEqual(A_ ,A_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''facebook/nllb-200-distilled-600M''' _lowerCamelCase: Any = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] _lowerCamelCase: Dict = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] _lowerCamelCase: List[str] = [ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def _SCREAMING_SNAKE_CASE ( cls : int ) -> List[str]: A = NllbTokenizer.from_pretrained( cls.checkpoint_name ,src_lang='eng_Latn' ,tgt_lang='ron_Latn' ) A = 1 return cls def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] ,25_6001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] ,25_6002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] ,25_6057 ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: self.assertIn(A_ ,self.tokenizer.all_special_ids ) # fmt: off A = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047] # fmt: on A = self.tokenizer.decode(A_ ,skip_special_tokens=A_ ) A = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ ) self.assertNotIn(self.tokenizer.eos_token ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: A = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] ,A_ ) A = 10 A = self.tokenizer(A_ ,max_length=A_ ,truncation=A_ ).input_ids[0] self.assertEqual(ids[-1] ,2 ) self.assertEqual(ids[0] ,A_ ) self.assertEqual(len(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) ,[25_6203, 3] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: A = tempfile.mkdtemp() A = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A_ ) A = NllbTokenizer.from_pretrained(A_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,A_ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=A_ ,truncation=A_ ,max_length=len(self.expected_src_tokens ) ,return_tensors='pt' ,) A = shift_tokens_right( batch['labels'] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id['ron_Latn'] ) self.assertIsInstance(A_ ,A_ ) self.assertEqual((2, 15) ,batch.input_ids.shape ) self.assertEqual((2, 15) ,batch.attention_mask.shape ) A = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,A_ ) self.assertEqual(A_ ,batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.tokenizer(self.src_text ,padding=A_ ,truncation=A_ ,max_length=3 ,return_tensors='pt' ) A = self.tokenizer( text_target=self.tgt_text ,padding=A_ ,truncation=A_ ,max_length=10 ,return_tensors='pt' ) A = targets['input_ids'] A = shift_tokens_right( A_ ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = self.tokenizer._build_translation_inputs( 'A test' ,return_tensors='pt' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' ) self.assertEqual( nested_simplify(A_ ) ,{ # A, test, EOS, en_XX 'input_ids': [[25_6047, 70, 7356, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_6057, } ,) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = True A = self.tokenizer( 'UN Chief says there is no military solution in Syria' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids ,[1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] ) A = False A = self.tokenizer( 'UN Chief says there is no military solution in Syria' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids ,[25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
74
"""simple docstring""" from __future__ import annotations import requests def _snake_case ( snake_case__ : str ): A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(snake_case__ ).json() def _snake_case ( snake_case__ : int = 10 ): A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' A = requests.get(snake_case__ ).json()[:max_stories] return [get_hackernews_story(snake_case__ ) for story_id in story_ids] def _snake_case ( snake_case__ : int = 10 ): A = hackernews_top_stories(snake_case__ ) return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
74
1
from math import isclose, sqrt def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[float, float, float]: lowerCAmelCase__ : Any = point_y / 4 / point_x lowerCAmelCase__ : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) lowerCAmelCase__ : List[Any] = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) lowerCAmelCase__ : str = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 lowerCAmelCase__ : List[Any] = outgoing_gradient**2 + 4 lowerCAmelCase__ : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) lowerCAmelCase__ : Dict = (point_y - outgoing_gradient * point_x) ** 2 - 100 lowerCAmelCase__ : Optional[Any] = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) lowerCAmelCase__ : Optional[int] = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point lowerCAmelCase__ : List[Any] = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus lowerCAmelCase__ : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1.4 , SCREAMING_SNAKE_CASE_ = -9.6 ) -> int: lowerCAmelCase__ : int = 0 lowerCAmelCase__ : float = first_x_coord lowerCAmelCase__ : float = first_y_coord lowerCAmelCase__ : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): lowerCAmelCase__ : List[Any] = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
363
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __magic_name__ , unittest.TestCase ): lowercase = UnCLIPImageVariationPipeline lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'} lowercase = IMAGE_VARIATION_BATCH_PARAMS lowercase = [ 'generator', 'return_dict', 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] lowercase = False @property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' return 32 @property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def _lowerCamelCase ( self : int ): '''simple docstring''' return self.time_input_dim @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return self.time_input_dim * 4 @property def _lowerCamelCase ( self : Any ): '''simple docstring''' return 100 @property def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(a ) @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : List[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(a ) @property def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : Union[str, Any] = { 'clip_embeddings_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'cross_attention_dim': self.cross_attention_dim, } lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a ) return model @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : str = { 'sample_size': 32, # RGB in channels 'in_channels': 3, # Out channels is double in channels because predicts mean and variance 'out_channels': 6, 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': 'identity', } lowerCAmelCase__ : str = UNetaDConditionModel(**a ) return model @property def _lowerCamelCase ( self : str ): '''simple docstring''' return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def _lowerCamelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def _lowerCamelCase ( self : int ): '''simple docstring''' torch.manual_seed(1 ) lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs ) return model def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.dummy_decoder lowerCAmelCase__ : Optional[int] = self.dummy_text_proj lowerCAmelCase__ : Any = self.dummy_text_encoder lowerCAmelCase__ : Any = self.dummy_tokenizer lowerCAmelCase__ : Any = self.dummy_super_res_first lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last lowerCAmelCase__ : Dict = UnCLIPScheduler( variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , ) lowerCAmelCase__ : Any = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , ) lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 ) lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ): '''simple docstring''' lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a ) if str(a ).startswith('mps' ): lowerCAmelCase__ : Optional[int] = torch.manual_seed(a ) else: lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a ) if pil_image: lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5 lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 ) lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = 'cpu' lowerCAmelCase__ : Any = self.get_dummy_components() lowerCAmelCase__ : List[str] = self.pipeline_class(**a ) lowerCAmelCase__ : Dict = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : str = pipe(**a ) lowerCAmelCase__ : Optional[Any] = output.images lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : Optional[int] = pipe( **a , return_dict=a , )[0] lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : List[str] = np.array( [ 0.9_9_9_7, 0.0_0_0_2, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_6_9, 0.0_0_2_3, 0.9_9_9_7, 0.9_9_6_9, 0.9_9_7_0, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = 'cpu' lowerCAmelCase__ : Dict = self.get_dummy_components() lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a ) lowerCAmelCase__ : int = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : List[str] = pipe(**a ) lowerCAmelCase__ : Union[str, Any] = output.images lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : int = pipe( **a , return_dict=a , )[0] lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Tuple = 'cpu' lowerCAmelCase__ : int = self.get_dummy_components() lowerCAmelCase__ : Tuple = self.pipeline_class(**a ) lowerCAmelCase__ : Union[str, Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : List[str] = [ pipeline_inputs['image'], pipeline_inputs['image'], ] lowerCAmelCase__ : Optional[int] = pipe(**a ) lowerCAmelCase__ : Tuple = output.images lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : Union[str, Any] = [ tuple_pipeline_inputs['image'], tuple_pipeline_inputs['image'], ] lowerCAmelCase__ : str = pipe( **a , return_dict=a , )[0] lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) lowerCAmelCase__ : Union[str, Any] = np.array( [ 0.9_9_9_7, 0.9_9_8_9, 0.0_0_0_8, 0.0_0_2_1, 0.9_9_6_0, 0.0_0_1_8, 0.0_0_1_4, 0.0_0_0_2, 0.9_9_3_3, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Tuple = torch.device('cpu' ) class A__ : lowercase = 1 lowerCAmelCase__ : Optional[Any] = self.get_dummy_components() lowerCAmelCase__ : Dict = self.pipeline_class(**a ) lowerCAmelCase__ : Optional[Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 ) lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : str = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) lowerCAmelCase__ : List[Any] = pipe.prepare_latents( a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() ) lowerCAmelCase__ : List[str] = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) lowerCAmelCase__ : Any = pipe.prepare_latents( a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : Optional[int] = pipe( **a , decoder_latents=a , super_res_latents=a ).images lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a ) # Don't pass image, instead pass embedding lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' ) lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds lowerCAmelCase__ : List[Any] = pipe( **a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1E-4 @skip_mps def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = torch_device == 'cpu' # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor lowerCAmelCase__ : int = 1E-2 self._test_attention_slicing_forward_pass( test_max_difference=a , expected_max_diff=a ) @skip_mps def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu' lowerCAmelCase__ : Any = True lowerCAmelCase__ : Optional[Any] = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] self._test_inference_batch_single_identical( test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes lowerCAmelCase__ : List[str] = [2, 3] self._test_inference_batch_consistent( batch_sizes=a , additional_params_copy_to_batched_inputs=a , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=a ) @skip_mps def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return super().test_save_load_local() @skip_mps def _lowerCamelCase ( self : str ): '''simple docstring''' return super().test_save_load_optional_components() @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' ) lowerCAmelCase__ : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/unclip/karlo_v1_alpha_cat_variation_fp16.npy' ) lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained( 'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa ) lowerCAmelCase__ : Union[str, Any] = pipeline.to(a ) pipeline.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 ) lowerCAmelCase__ : List[str] = pipeline( a , generator=a , output_type='np' , ) lowerCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(a , a , 15 )
307
0
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __snake_case ( ): raise RuntimeError("CUDA out of memory." ) class snake_case ( nn.Module ): """simple docstring""" def __init__( self ): """simple docstring""" super().__init__() lowerCamelCase_ = nn.Linear(3 , 4 ) lowerCamelCase_ = nn.BatchNormad(4 ) lowerCamelCase_ = nn.Linear(4 , 5 ) def snake_case ( self , UpperCamelCase ): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase ) ) ) class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(UpperCamelCase ): nonlocal batch_sizes batch_sizes.append(UpperCamelCase ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(UpperCamelCase , [128, 64, 32, 16, 8] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(UpperCamelCase , UpperCamelCase ): nonlocal batch_sizes batch_sizes.append(UpperCamelCase ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowerCamelCase_ ,lowerCamelCase_ = mock_training_loop_function("hello" ) self.assertListEqual(UpperCamelCase , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, "hello"] ) def snake_case ( self ): """simple docstring""" @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(UpperCamelCase ): pass with self.assertRaises(UpperCamelCase ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def snake_case ( self ): """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(UpperCamelCase ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(UpperCamelCase ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def snake_case ( self ): """simple docstring""" @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(UpperCamelCase , UpperCamelCase , UpperCamelCase ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(UpperCamelCase ) as cm: mock_training_loop_function(128 , "hello" , "world" ) self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] ) self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] ) def snake_case ( self ): """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(UpperCamelCase ): raise ValueError("Oops, we had an error!" ) with self.assertRaises(UpperCamelCase ) as cm: mock_training_loop_function() self.assertIn("Oops, we had an error!" , cm.exception.args[0] ) @require_cuda def snake_case ( self ): """simple docstring""" lowerCamelCase_ = torch.cuda.memory_allocated() lowerCamelCase_ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , UpperCamelCase ) lowerCamelCase_ = release_memory(UpperCamelCase ) self.assertEqual(torch.cuda.memory_allocated() , UpperCamelCase )
55
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_a) class UpperCAmelCase_ ( _a): lowerCamelCase__ : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True}) lowerCamelCase__ : ClassVar[Features] = Features({"text": Value("string")}) lowerCamelCase__ : ClassVar[Features] = Features({}) lowerCamelCase__ : str = "text" @property def _UpperCAmelCase ( self ) -> Dict[str, str]: return {self.text_column: "text"}
77
0
"""simple docstring""" def _snake_case ( lowerCamelCase__ : int ) -> int: assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: lowerCamelCase_ : Tuple =F"""The input value of [n={number}] has to be > 0""" raise ValueError(lowerCamelCase__ ) else: lowerCamelCase_ : int =sylvester(number - 1 ) lowerCamelCase_ : Optional[int] =num - 1 lowerCamelCase_ : Optional[int] =num return lower * upper + 1 if __name__ == "__main__": print(f'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
209
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def _snake_case ( lowerCamelCase__ : Any ) -> Union[str, Any]: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def _snake_case ( ) -> List[Any]: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" lowerCamelCase_ : Optional[Any] =[1, 2, 3] with pytest.raises(lowerCamelCase__ ): with parallel_backend("unsupported backend" ): map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=2 ) with pytest.raises(lowerCamelCase__ ): with parallel_backend("unsupported backend" ): map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def _snake_case ( lowerCamelCase__ : Tuple ) -> Optional[Any]: lowerCamelCase_ : str =[1, 2] lowerCamelCase_ : List[str] ={"a": 1, "b": 2} lowerCamelCase_ : List[str] ={"a": [1, 2], "b": [3, 4]} lowerCamelCase_ : Optional[int] ={"a": {"1": 1}, "b": 2} lowerCamelCase_ : int ={"a": 1, "b": 2, "c": 3, "d": 4} lowerCamelCase_ : Optional[int] =[2, 3] lowerCamelCase_ : List[Any] ={"a": 2, "b": 3} lowerCamelCase_ : int ={"a": [2, 3], "b": [4, 5]} lowerCamelCase_ : str ={"a": {"1": 2}, "b": 3} lowerCamelCase_ : Dict ={"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
209
1
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _lowerCamelCase: lowercase_ : Optional[Union[str, Path]] = None lowercase_ : bool = False lowercase_ : bool = False lowercase_ : bool = False lowercase_ : Optional[Dict] = None lowercase_ : Optional[str] = None lowercase_ : bool = False lowercase_ : bool = False lowercase_ : bool = False lowercase_ : bool = True lowercase_ : Optional[int] = None lowercase_ : int = 1 lowercase_ : Optional[Union[str, bool]] = None lowercase_ : bool = False lowercase_ : Optional[Dict] = None lowercase_ : Optional[str] = None def UpperCamelCase ( self) -> "DownloadConfig": """simple docstring""" return self.__class__(**{k: copy.deepcopy(lowerCamelCase) for k, v in self.__dict__.items()})
21
'''simple docstring''' from __future__ import annotations def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float: if len(_lowerCAmelCase ) == 0: raise ValueError('''find_max() arg is an empty sequence''' ) if ( left >= len(_lowerCAmelCase ) or left < -len(_lowerCAmelCase ) or right >= len(_lowerCAmelCase ) or right < -len(_lowerCAmelCase ) ): raise IndexError('''list index out of range''' ) if left == right: return nums[left] UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid] UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
23
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _snake_case : def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ): __magic_name__ : Optional[int] = parent __magic_name__ : Any = batch_size __magic_name__ : Union[str, Any] = seq_length __magic_name__ : str = is_training __magic_name__ : Union[str, Any] = use_token_type_ids __magic_name__ : List[Any] = use_labels __magic_name__ : List[Any] = vocab_size __magic_name__ : Optional[Any] = hidden_size __magic_name__ : Optional[Any] = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : List[Any] = intermediate_size __magic_name__ : str = hidden_act __magic_name__ : str = hidden_dropout_prob __magic_name__ : Any = attention_probs_dropout_prob __magic_name__ : int = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : List[Any] = type_sequence_label_size __magic_name__ : int = initializer_range __magic_name__ : Optional[Any] = num_labels __magic_name__ : Optional[Any] = num_choices __magic_name__ : List[str] = scope __magic_name__ : str = self.vocab_size - 1 def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_token_type_ids: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Dict = None __magic_name__ : Tuple = None __magic_name__ : List[str] = None if self.use_labels: __magic_name__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[int] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __magic_name__ : int = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , *_a ): __magic_name__ : Optional[Any] = OpenAIGPTModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ : Optional[Any] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase ) __magic_name__ : Union[str, Any] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) __magic_name__ : str = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , *_a ): __magic_name__ : int = OpenAIGPTLMHeadModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ : List[str] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , *_a ): __magic_name__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ : List[str] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , *_a ): __magic_name__ : Dict = self.num_labels __magic_name__ : List[Any] = OpenAIGPTForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[int] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[str] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : List[str] = config_and_inputs __magic_name__ : Any = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ): UpperCamelCase__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase__ = ( { 'feature-extraction': OpenAIGPTModel, 'text-classification': OpenAIGPTForSequenceClassification, 'text-generation': OpenAIGPTLMHeadModel, 'zero-shot': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=False ): __magic_name__ : List[str] = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __magic_name__ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase , ) __magic_name__ : List[str] = inputs_dict["labels"] __magic_name__ : Dict = inputs_dict["labels"] __magic_name__ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCAmelCase , ) __magic_name__ : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : int = OpenAIGPTModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=37 ) def SCREAMING_SNAKE_CASE ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = OpenAIGPTModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCAmelCase ) __magic_name__ : Dict = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=__lowerCAmelCase ) # the president is __magic_name__ : str = [ 481, 4_735, 544, 246, 963, 870, 762, 239, 244, 40_477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the __magic_name__ : Union[str, Any] = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase )
360
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class _snake_case : UpperCamelCase__ = LEDConfig UpperCamelCase__ = {} UpperCamelCase__ = 'gelu' def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ): __magic_name__ : Optional[int] = parent __magic_name__ : Optional[int] = batch_size __magic_name__ : int = seq_length __magic_name__ : Union[str, Any] = is_training __magic_name__ : Tuple = use_labels __magic_name__ : Optional[int] = vocab_size __magic_name__ : Dict = hidden_size __magic_name__ : Union[str, Any] = num_hidden_layers __magic_name__ : int = num_attention_heads __magic_name__ : str = intermediate_size __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : List[Any] = attention_probs_dropout_prob __magic_name__ : List[str] = max_position_embeddings __magic_name__ : List[str] = eos_token_id __magic_name__ : Any = pad_token_id __magic_name__ : List[Any] = bos_token_id __magic_name__ : Union[str, Any] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after __magic_name__ : Optional[int] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests __magic_name__ : List[str] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __magic_name__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __magic_name__ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Dict = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) __magic_name__ : Optional[int] = prepare_led_inputs_dict(_a , _a , _a ) __magic_name__ : List[str] = tf.concat( [tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , ) __magic_name__ : str = global_attention_mask return config, inputs_dict def SCREAMING_SNAKE_CASE ( self , _a , _a ): __magic_name__ : Optional[int] = TFLEDModel(config=_a ).get_decoder() __magic_name__ : Optional[Any] = inputs_dict["input_ids"] __magic_name__ : List[Any] = input_ids[:1, :] __magic_name__ : Tuple = inputs_dict["attention_mask"][:1, :] __magic_name__ : Dict = 1 # first forward pass __magic_name__ : List[Any] = model(_a , attention_mask=_a , use_cache=_a ) __magic_name__ , __magic_name__ : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __magic_name__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size ) __magic_name__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __magic_name__ : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) __magic_name__ : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __magic_name__ : Any = model(_a , attention_mask=_a )[0] __magic_name__ : Union[str, Any] = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __magic_name__ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __magic_name__ : List[str] = output_from_no_past[:, -3:, random_slice_idx] __magic_name__ : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) def lowerCAmelCase_ ( _snake_case : int , _snake_case : int , _snake_case : Any , _snake_case : Optional[Any]=None , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : Dict=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: __magic_name__ : Dict = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __magic_name__ : int = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __magic_name__ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __magic_name__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class _snake_case ( snake_case , snake_case , unittest.TestCase ): UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else () UpperCamelCase__ = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : str = TFLEDModelTester(self ) __magic_name__ : int = ConfigTester(self , config_class=_a ) def SCREAMING_SNAKE_CASE ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ , __magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : List[Any] = tf.zeros_like(inputs_dict["attention_mask"] ) __magic_name__ : Optional[int] = 2 __magic_name__ : Tuple = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , ) __magic_name__ : Union[str, Any] = True __magic_name__ : Any = self.model_tester.seq_length __magic_name__ : str = self.model_tester.encoder_seq_length def check_decoder_attentions_output(_a ): __magic_name__ : List[Any] = outputs.decoder_attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(_a ): __magic_name__ : List[Any] = [t.numpy() for t in outputs.encoder_attentions] __magic_name__ : str = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: __magic_name__ : str = True __magic_name__ : List[str] = False __magic_name__ : Any = False __magic_name__ : Union[str, Any] = model_class(_a ) __magic_name__ : List[Any] = model(self._prepare_for_class(_a , _a ) ) __magic_name__ : List[Any] = len(_a ) self.assertEqual(config.output_hidden_states , _a ) check_encoder_attentions_output(_a ) if self.is_encoder_decoder: __magic_name__ : List[Any] = model_class(_a ) __magic_name__ : Optional[int] = model(self._prepare_for_class(_a , _a ) ) self.assertEqual(config.output_hidden_states , _a ) check_decoder_attentions_output(_a ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Tuple = True __magic_name__ : Dict = model_class(_a ) __magic_name__ : Any = model(self._prepare_for_class(_a , _a ) ) self.assertEqual(config.output_hidden_states , _a ) check_encoder_attentions_output(_a ) # Check attention is always last and order is fine __magic_name__ : Any = True __magic_name__ : Optional[int] = True __magic_name__ : Union[str, Any] = model_class(_a ) __magic_name__ : Union[str, Any] = model(self._prepare_for_class(_a , _a ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) ) self.assertEqual(model.config.output_hidden_states , _a ) check_encoder_attentions_output(_a ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def SCREAMING_SNAKE_CASE ( self ): pass def SCREAMING_SNAKE_CASE ( self ): # TODO: Head-masking not yet implement pass def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Any: '''simple docstring''' return tf.constant(_snake_case , dtype=tf.intaa ) snake_case : Tuple = 1E-4 @slow @require_tf class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : str = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here __magic_name__ : Tuple = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) __magic_name__ : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) __magic_name__ : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a ) __magic_name__ : Union[str, Any] = model(**_a )[0] __magic_name__ : str = (1, 1_024, 768) self.assertEqual(output.shape , _a ) # change to expected output here __magic_name__ : List[str] = tf.convert_to_tensor( [[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , ) tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here __magic_name__ : Optional[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) __magic_name__ : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) __magic_name__ : Any = prepare_led_inputs_dict(model.config , _a , _a ) __magic_name__ : Tuple = model(**_a )[0] __magic_name__ : Optional[int] = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , _a ) # change to expected output here __magic_name__ : List[str] = tf.convert_to_tensor( [[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , ) tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
41
0
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int | str] ) -> None: create_state_space_tree(snake_case_ , [] , 0 , [0 for i in range(len(snake_case_ ) )] ) def lowerCamelCase__ ( snake_case_ : list[int | str] , snake_case_ : list[int | str] , snake_case_ : int , snake_case_ : list[int] , ) -> None: if index == len(snake_case_ ): print(snake_case_ ) return for i in range(len(snake_case_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) __snake_case = True create_state_space_tree(snake_case_ , snake_case_ , index + 1 , snake_case_ ) current_sequence.pop() __snake_case = False snake_case_ = [3, 1, 2, 4] generate_all_permutations(sequence) snake_case_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
24
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''ernie_m''' UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = classifier_dropout lowerCAmelCase = is_decoder lowerCAmelCase = act_dropout
272
0
'''simple docstring''' from math import factorial, pi def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = 30 ) -> float: '''simple docstring''' if not isinstance(__A, (int, float) ): raise ValueError('''maclaurin_sin() requires either an int or float for theta''' ) if not isinstance(__A, __A ) or accuracy <= 0: raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' ) snake_case_ = float(__A ) snake_case_ = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = 30 ) -> float: '''simple docstring''' if not isinstance(__A, (int, float) ): raise ValueError('''maclaurin_cos() requires either an int or float for theta''' ) if not isinstance(__A, __A ) or accuracy <= 0: raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' ) snake_case_ = float(__A ) snake_case_ = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
357
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class a ( _lowerCamelCase ): snake_case_ = 42 snake_case_ = None def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=0.9_9_9, __UpperCAmelCase="cosine", ) -> Dict: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" ) snake_case_ = [] for i in range(__UpperCAmelCase ): snake_case_ = i / num_diffusion_timesteps snake_case_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ), __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase, dtype=torch.floataa ) class a ( _lowerCamelCase , _lowerCamelCase ): @register_to_config def __init__( self : List[str] , lowercase_ : int = 1000 , lowercase_ : str = "fixed_small_log" , lowercase_ : bool = True , lowercase_ : Optional[float] = 1.0 , lowercase_ : str = "epsilon" , lowercase_ : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) snake_case_ = betas_for_alpha_bar(lowercase_ ) snake_case_ = 1.0 - self.betas snake_case_ = torch.cumprod(self.alphas , dim=0 ) snake_case_ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution snake_case_ = 1.0 # setable values snake_case_ = None snake_case_ = torch.from_numpy(np.arange(0 , lowercase_ )[::-1].copy() ) snake_case_ = variance_type def A_ ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None ): return sample def A_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Union[str, torch.device] = None ): snake_case_ = num_inference_steps snake_case_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) snake_case_ = (np.arange(0 , lowercase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) snake_case_ = torch.from_numpy(lowercase_ ).to(lowercase_ ) def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None ): if prev_timestep is None: snake_case_ = t - 1 snake_case_ = self.alphas_cumprod[t] snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one snake_case_ = 1 - alpha_prod_t snake_case_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: snake_case_ = self.betas[t] else: snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample snake_case_ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: snake_case_ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": snake_case_ = torch.log(torch.clamp(lowercase_ , min=1e-20 ) ) snake_case_ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler snake_case_ = variance.log() snake_case_ = beta.log() snake_case_ = (predicted_variance + 1) / 2 snake_case_ = frac * max_log + (1 - frac) * min_log return variance def A_ ( self : List[Any] , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None , lowercase_ : int=None , lowercase_ : bool = True , ): snake_case_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": snake_case_ ,snake_case_ = torch.split(lowercase_ , sample.shape[1] , dim=1 ) else: snake_case_ = None # 1. compute alphas, betas if prev_timestep is None: snake_case_ = t - 1 snake_case_ = self.alphas_cumprod[t] snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one snake_case_ = 1 - alpha_prod_t snake_case_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: snake_case_ = self.betas[t] snake_case_ = self.alphas[t] else: snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev snake_case_ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": snake_case_ = model_output else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`" ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: snake_case_ = torch.clamp( lowercase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf snake_case_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t snake_case_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf snake_case_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise snake_case_ = 0 if t > 0: snake_case_ = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=lowercase_ , device=model_output.device ) snake_case_ = self._get_variance( lowercase_ , predicted_variance=lowercase_ , prev_timestep=lowercase_ , ) if self.variance_type == "fixed_small_log": snake_case_ = variance elif self.variance_type == "learned_range": snake_case_ = (0.5 * variance).exp() else: raise ValueError( F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`" ''' for the UnCLIPScheduler.''' ) snake_case_ = variance * variance_noise snake_case_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_ ) def A_ ( self : Any , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples snake_case_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) snake_case_ = timesteps.to(original_samples.device ) snake_case_ = alphas_cumprod[timesteps] ** 0.5 snake_case_ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): snake_case_ = sqrt_alpha_prod.unsqueeze(-1 ) snake_case_ = (1 - alphas_cumprod[timesteps]) ** 0.5 snake_case_ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): snake_case_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) snake_case_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
72
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class A_ ( unittest.TestCase ): def __init__( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]=7 , snake_case_ : Union[str, Any]=3 , snake_case_ : Tuple=3_0 , snake_case_ : Optional[Any]=4_0_0 , snake_case_ : Any=True , snake_case_ : Optional[Any]=None , snake_case_ : Union[str, Any]=0.9 , snake_case_ : Optional[int]=None , snake_case_ : Optional[Any]=True , snake_case_ : Any=[0.5, 0.5, 0.5] , snake_case_ : str=[0.5, 0.5, 0.5] , ): _UpperCAmelCase = size if size is not None else {"shortest_edge": 3_0} _UpperCAmelCase = crop_size if crop_size is not None else {"height": 3_0, "width": 3_0} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize_and_center_crop _UpperCAmelCase = size _UpperCAmelCase = crop_pct _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def lowercase ( self : List[Any] ): return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class A_ ( _a , unittest.TestCase ): _lowerCamelCase : str = PoolFormerImageProcessor if is_vision_available() else None def lowercase ( self : List[Any] ): _UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def lowercase ( self : int ): return self.image_processor_tester.prepare_image_processor_dict() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case_ , "do_resize_and_center_crop" ) ) self.assertTrue(hasattr(snake_case_ , "size" ) ) self.assertTrue(hasattr(snake_case_ , "crop_pct" ) ) self.assertTrue(hasattr(snake_case_ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case_ , "image_mean" ) ) self.assertTrue(hasattr(snake_case_ , "image_std" ) ) def lowercase ( self : List[str] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 3_0} ) self.assertEqual(image_processor.crop_size , {"height": 3_0, "width": 3_0} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2} ) self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} ) def lowercase ( self : Tuple ): pass def lowercase ( self : int ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _UpperCAmelCase = image_processing(snake_case_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowercase ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _UpperCAmelCase = image_processing(snake_case_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowercase ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _UpperCAmelCase = image_processing(snake_case_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
22
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=_a ) class SCREAMING_SNAKE_CASE__ ( _a ): _a = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) _a = Features({'audio': Audio()} ) _a = Features({'labels': ClassLabel} ) _a = "audio" _a = "labels" def __lowercase ( self : List[str] , lowerCAmelCase : Optional[int] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowerCAmelCase ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowerCAmelCase = copy.deepcopy(self ) lowerCAmelCase = self.label_schema.copy() lowerCAmelCase = features[self.label_column] lowerCAmelCase = label_schema return task_template @property def __lowercase ( self : List[str] ): return { self.audio_column: "audio", self.label_column: "labels", }
155
0
from __future__ import annotations def lowerCamelCase_ ( _a , _a , _a , ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
211
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { '''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''', '''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class _a ( _lowercase): _a : Optional[int] = '''xlm-roberta-xl''' def __init__( self : Any , _SCREAMING_SNAKE_CASE : str=25_0880 , _SCREAMING_SNAKE_CASE : Optional[Any]=2560 , _SCREAMING_SNAKE_CASE : int=36 , _SCREAMING_SNAKE_CASE : Optional[int]=32 , _SCREAMING_SNAKE_CASE : Any=1_0240 , _SCREAMING_SNAKE_CASE : List[str]="gelu" , _SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=514 , _SCREAMING_SNAKE_CASE : Optional[int]=1 , _SCREAMING_SNAKE_CASE : Tuple=0.02 , _SCREAMING_SNAKE_CASE : Dict=1E-05 , _SCREAMING_SNAKE_CASE : Tuple=1 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Any=None , **_SCREAMING_SNAKE_CASE : Tuple , )-> str: super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Any = vocab_size lowerCAmelCase__ : List[Any] = hidden_size lowerCAmelCase__ : int = num_hidden_layers lowerCAmelCase__ : Dict = num_attention_heads lowerCAmelCase__ : str = hidden_act lowerCAmelCase__ : List[Any] = intermediate_size lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : str = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = max_position_embeddings lowerCAmelCase__ : Any = type_vocab_size lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : Optional[int] = position_embedding_type lowerCAmelCase__ : Any = use_cache lowerCAmelCase__ : List[Any] = classifier_dropout class _a ( _lowercase): @property def UpperCAmelCase__( self : Any )-> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase__ : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
211
1
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowercase , lowercase=2 , lowercase=56 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=2 , lowercase=7 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , lowercase="block_sparse" , lowercase=True , lowercase=False , lowercase=2 , lowercase=3 , ) -> Any: '''simple docstring''' a__ : List[str] = parent a__ : Optional[Any] = batch_size a__ : Tuple = seq_length a__ : Optional[Any] = is_training a__ : Optional[Any] = use_attention_mask a__ : Any = use_token_type_ids a__ : Union[str, Any] = use_labels a__ : Union[str, Any] = vocab_size a__ : Dict = hidden_size a__ : Union[str, Any] = num_hidden_layers a__ : Optional[int] = num_attention_heads a__ : Optional[int] = intermediate_size a__ : Dict = hidden_act a__ : Optional[Any] = hidden_dropout_prob a__ : int = attention_probs_dropout_prob a__ : Tuple = max_position_embeddings a__ : Tuple = type_vocab_size a__ : Tuple = type_sequence_label_size a__ : Union[str, Any] = initializer_range a__ : List[Any] = num_choices a__ : List[str] = rescale_embeddings a__ : str = attention_type a__ : Dict = use_bias a__ : Union[str, Any] = block_size a__ : Dict = num_random_blocks def __lowercase ( self) -> Any: '''simple docstring''' a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a__ : int = None if self.use_attention_mask: a__ : Any = random_attention_mask([self.batch_size, self.seq_length]) a__ : Any = None if self.use_token_type_ids: a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __lowercase ( self) -> int: '''simple docstring''' a__ : Optional[int] = self.prepare_config_and_inputs() a__ , a__ , a__ , a__ : Optional[Any] = config_and_inputs a__ : Tuple = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask, } return config, inputs_dict @require_flax class A__ ( __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __A : Optional[Any] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[Any] = False __A : int = False def __lowercase ( self) -> int: '''simple docstring''' a__ : Any = FlaxBigBirdModelTester(self) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __lowercase ( self) -> List[Any]: '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __lowercase ( self) -> Dict: '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __lowercase ( self) -> Dict: '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __lowercase ( self) -> Dict: '''simple docstring''' super().test_hidden_states_output() @slow def __lowercase ( self) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: a__ : Optional[int] = model_class_name.from_pretrained('google/bigbird-roberta-base') self.assertIsNotNone(lowercase) def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): a__ : List[str] = self._prepare_for_class(lowercase , lowercase) a__ : Optional[Any] = model_class(lowercase) @jax.jit def model_jitted(lowercase , lowercase=None , **lowercase): return model(input_ids=lowercase , attention_mask=lowercase , **lowercase) with self.subTest('JIT Enabled'): a__ : str = model_jitted(**lowercase).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): a__ : int = model_jitted(**lowercase).to_tuple() self.assertEqual(len(lowercase) , len(lowercase)) for jitted_output, output in zip(lowercase , lowercase): self.assertEqual(jitted_output.shape , output.shape) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase=1e-5 , lowercase="outputs" , lowercase=None) -> Optional[int]: '''simple docstring''' if name.startswith('outputs.attentions'): return else: super().check_pt_flax_outputs(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase)
99
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowercase : List[str] = logging.get_logger(__name__) @dataclass class A__ ( __UpperCAmelCase ): """simple docstring""" __A : Optional[Any] = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **lowercase) -> Tuple: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: a__ : Union[str, Any] = deprecated_arg[3:] setattr(self , lowercase , not kwargs.pop(lowercase)) logger.warning( F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}') a__ : Union[str, Any] = kwargs.pop('torchscript' , self.torchscript) a__ : Tuple = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics) a__ : Tuple = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level) super().__init__(**lowercase) __A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Trace the models using torchscript'''} ) __A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) __A : str = field( default='''O1''' , metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } , ) @cached_property def __lowercase ( self) -> Tuple["torch.device", int]: '''simple docstring''' requires_backends(self , ['torch']) logger.info('PyTorch: setting up devices') if not self.cuda: a__ : List[str] = torch.device('cpu') a__ : Optional[Any] = 0 elif is_torch_tpu_available(): a__ : List[str] = xm.xla_device() a__ : Union[str, Any] = 0 else: a__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu') a__ : Optional[int] = torch.cuda.device_count() return device, n_gpu @property def __lowercase ( self) -> List[str]: '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def __lowercase ( self) -> int: '''simple docstring''' requires_backends(self , ['torch']) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def __lowercase ( self) -> "torch.device": '''simple docstring''' requires_backends(self , ['torch']) return self._setup_devices[0] @property def __lowercase ( self) -> Dict: '''simple docstring''' requires_backends(self , ['torch']) return self._setup_devices[1] @property def __lowercase ( self) -> Optional[int]: '''simple docstring''' return self.n_gpu > 0
99
1
"""simple docstring""" def __A ( a_ :list) -> list: if len(a_) <= 1: return lst __a : Tuple = 1 while i < len(a_): if lst[i - 1] <= lst[i]: i += 1 else: __a , __a : Any = lst[i], lst[i - 1] i -= 1 if i == 0: __a : List[str] = 1 return lst if __name__ == "__main__": A = input('''Enter numbers separated by a comma:\n''').strip() A = [int(item) for item in user_input.split(''',''')] print(gnome_sort(unsorted))
188
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __lowercase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = '''openai/whisper-base''' __lowerCAmelCase = ( '''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the ''' '''transcribed text.''' ) __lowerCAmelCase = '''transcriber''' __lowerCAmelCase = WhisperProcessor __lowerCAmelCase = WhisperForConditionalGeneration __lowerCAmelCase = ['''audio'''] __lowerCAmelCase = ['''text'''] def _lowerCamelCase ( self , _UpperCAmelCase ): return self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).input_features def _lowerCamelCase ( self , _UpperCAmelCase ): return self.model.generate(inputs=_UpperCAmelCase ) def _lowerCamelCase ( self , _UpperCAmelCase ): return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0]
188
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _a = { "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["LayoutLMv2FeatureExtractor"] _a = ["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
209
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor _a = random.Random() def lowerCAmelCase__(__snake_case ,__snake_case=1.0 ,__snake_case=None ,__snake_case=None ) -> List[Any]: '''simple docstring''' if rng is None: lowerCamelCase__ = global_rng lowerCamelCase__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __A ( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=2_0_0_0 , __lowerCAmelCase=2_4 , __lowerCAmelCase=2_4 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1_6_0_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=True , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = min_seq_length lowerCamelCase__ = max_seq_length lowerCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase__ = feature_size lowerCamelCase__ = num_mel_bins lowerCamelCase__ = padding_value lowerCamelCase__ = sampling_rate lowerCamelCase__ = return_attention_mask lowerCamelCase__ = do_normalize def __lowerCamelCase ( self ): '''simple docstring''' return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __lowerCamelCase ( self , __lowerCAmelCase=False , __lowerCAmelCase=False ): '''simple docstring''' def _flatten(__lowerCAmelCase ): return list(itertools.chain(*__lowerCAmelCase ) ) if equal_length: lowerCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase__ = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCamelCase__ = [np.asarray(__lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __A ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = SpeechaTextFeatureExtractor if is_speech_available() else None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = SpeechaTextFeatureExtractionTester(self ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' self.assertTrue(np.all(np.mean(__lowerCAmelCase , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] lowerCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size lowerCamelCase__ = feature_extractor(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCamelCase__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features lowerCamelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) # Test batched lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] lowerCamelCase__ = np.asarray(__lowerCAmelCase ) lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] lowerCamelCase__ = ['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase__ = [None, 1_6, None] for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = feature_extractor( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase ) lowerCamelCase__ = inputs.input_features lowerCamelCase__ = inputs.attention_mask lowerCamelCase__ = [np.sum(__lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] lowerCamelCase__ = ['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase__ = [None, 1_6, None] for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = feature_extractor( __lowerCAmelCase , max_length=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase ) lowerCamelCase__ = inputs.input_features lowerCamelCase__ = inputs.attention_mask lowerCamelCase__ = [np.sum(__lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] lowerCamelCase__ = feature_extractor( __lowerCAmelCase , padding='''max_length''' , max_length=4 , truncation=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase , ) lowerCamelCase__ = inputs.input_features lowerCamelCase__ = inputs.attention_mask lowerCamelCase__ = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] lowerCamelCase__ = feature_extractor( __lowerCAmelCase , padding='''longest''' , max_length=4 , truncation=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase , ) lowerCamelCase__ = inputs.input_features lowerCamelCase__ = inputs.attention_mask lowerCamelCase__ = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 2_4) ) lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] lowerCamelCase__ = feature_extractor( __lowerCAmelCase , padding='''longest''' , max_length=1_6 , truncation=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase , ) lowerCamelCase__ = inputs.input_features lowerCamelCase__ = inputs.attention_mask lowerCamelCase__ = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 2_4) ) def __lowerCamelCase ( self ): '''simple docstring''' import torch lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase__ = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa ) lowerCamelCase__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCamelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' from datasets import load_dataset lowerCamelCase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech lowerCamelCase__ = ds.sort('''id''' ).select(range(__lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on lowerCamelCase__ = self._load_datasamples(1 ) lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) ) self.assertTrue(np.allclose(input_features[0, 0, :3_0] , __lowerCAmelCase , atol=1E-4 ) )
209
1
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" a : Optional[int] =[r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 5_0_2_5_7 , _lowerCamelCase = 1_0_2_4 , _lowerCamelCase = 7_6_8 , _lowerCamelCase = 1_2 , _lowerCamelCase = 1_2 , _lowerCamelCase = None , _lowerCamelCase = "gelu_new" , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 1e-5 , _lowerCamelCase = 0.0_2 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = False , ): super().__init__() UpperCamelCase_: str = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) UpperCamelCase_: Any = prefix_inner_dim UpperCamelCase_: Dict = prefix_hidden_dim UpperCamelCase_: int = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) UpperCamelCase_: Tuple = ( nn.Linear(self.prefix_hidden_dim , _lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity() ) UpperCamelCase_: Union[str, Any] = GPTaConfig( vocab_size=_lowerCamelCase , n_positions=_lowerCamelCase , n_embd=_lowerCamelCase , n_layer=_lowerCamelCase , n_head=_lowerCamelCase , n_inner=_lowerCamelCase , activation_function=_lowerCamelCase , resid_pdrop=_lowerCamelCase , embd_pdrop=_lowerCamelCase , attn_pdrop=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , initializer_range=_lowerCamelCase , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , scale_attn_by_inverse_layer_idx=_lowerCamelCase , reorder_and_upcast_attn=_lowerCamelCase , ) UpperCamelCase_: Optional[Any] = GPTaLMHeadModel(_lowerCamelCase ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ): UpperCamelCase_: Any = self.transformer.transformer.wte(_lowerCamelCase ) UpperCamelCase_: Any = self.encode_prefix(_lowerCamelCase ) UpperCamelCase_: Tuple = self.decode_prefix(_lowerCamelCase ) UpperCamelCase_: Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: UpperCamelCase_: Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) UpperCamelCase_: str = torch.cat((dummy_token, input_ids) , dim=1 ) UpperCamelCase_: int = self.transformer(inputs_embeds=_lowerCamelCase , labels=_lowerCamelCase , attention_mask=_lowerCamelCase ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def _a ( self , _lowerCamelCase , _lowerCamelCase ): return torch.zeros(_lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=_lowerCamelCase ) def _a ( self , _lowerCamelCase ): return self.encode_prefix(_lowerCamelCase ) @torch.no_grad() def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: List[Any] = torch.split(_lowerCamelCase , 1 , dim=0 ) UpperCamelCase_: Optional[Any] = [] UpperCamelCase_: Optional[int] = [] for feature in features: UpperCamelCase_: Tuple = self.decode_prefix(feature.to(_lowerCamelCase ) ) # back to the clip feature # Only support beam search for now UpperCamelCase_: Optional[Any] = self.generate_beam( input_embeds=_lowerCamelCase , device=_lowerCamelCase , eos_token_id=_lowerCamelCase ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) UpperCamelCase_: Any = torch.stack(_lowerCamelCase ) UpperCamelCase_: Tuple = torch.stack(_lowerCamelCase ) return generated_tokens, generated_seq_lengths @torch.no_grad() def _a ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = 5 , _lowerCamelCase = 6_7 , _lowerCamelCase = 1.0 , _lowerCamelCase = None , ): UpperCamelCase_: Dict = eos_token_id UpperCamelCase_: Tuple = None UpperCamelCase_: Optional[Any] = None UpperCamelCase_: List[Any] = torch.ones(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.int ) UpperCamelCase_: int = torch.zeros(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.bool ) if input_embeds is not None: UpperCamelCase_: List[str] = input_embeds else: UpperCamelCase_: str = self.transformer.transformer.wte(_lowerCamelCase ) for i in range(_lowerCamelCase ): UpperCamelCase_: Dict = self.transformer(inputs_embeds=_lowerCamelCase ) UpperCamelCase_: Any = outputs.logits UpperCamelCase_: Optional[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) UpperCamelCase_: str = logits.softmax(-1 ).log() if scores is None: UpperCamelCase_: List[Any] = logits.topk(_lowerCamelCase , -1 ) UpperCamelCase_: Union[str, Any] = generated.expand(_lowerCamelCase , *generated.shape[1:] ) UpperCamelCase_: Union[str, Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: UpperCamelCase_: Optional[Any] = next_tokens else: UpperCamelCase_: Tuple = tokens.expand(_lowerCamelCase , *tokens.shape[1:] ) UpperCamelCase_: Optional[int] = torch.cat((tokens, next_tokens) , dim=1 ) else: UpperCamelCase_: Tuple = -float(np.inf ) UpperCamelCase_: Tuple = 0 UpperCamelCase_: Optional[Any] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 UpperCamelCase_: Optional[int] = scores_sum / seq_lengths[:, None] UpperCamelCase_: str = scores_sum_average.view(-1 ).topk(_lowerCamelCase , -1 ) UpperCamelCase_: List[str] = next_tokens // scores_sum.shape[1] UpperCamelCase_: Union[str, Any] = seq_lengths[next_tokens_source] UpperCamelCase_: int = next_tokens % scores_sum.shape[1] UpperCamelCase_: Optional[Any] = next_tokens.unsqueeze(1 ) UpperCamelCase_: Dict = tokens[next_tokens_source] UpperCamelCase_: List[Any] = torch.cat((tokens, next_tokens) , dim=1 ) UpperCamelCase_: List[Any] = generated[next_tokens_source] UpperCamelCase_: Union[str, Any] = scores_sum_average * seq_lengths UpperCamelCase_: List[Any] = is_stopped[next_tokens_source] UpperCamelCase_: Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) UpperCamelCase_: int = torch.cat((generated, next_token_embed) , dim=1 ) UpperCamelCase_: Dict = is_stopped + next_tokens.eq(_lowerCamelCase ).squeeze() if is_stopped.all(): break UpperCamelCase_: Optional[Any] = scores / seq_lengths UpperCamelCase_: str = scores.argsort(descending=_lowerCamelCase ) # tokens tensors are already padded to max_seq_length UpperCamelCase_: Dict = [tokens[i] for i in order] UpperCamelCase_: Optional[int] = torch.stack(_lowerCamelCase , dim=0 ) UpperCamelCase_: Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
352
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Tuple = logging.get_logger(__name__) A_ : Dict = { 'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json', # See all XGLM models at https://huggingface.co/models?filter=xglm } class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" a : Tuple ='''xglm''' a : List[Any] =['''past_key_values'''] a : Union[str, Any] ={ '''num_attention_heads''': '''attention_heads''', '''hidden_size''': '''d_model''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , _lowerCamelCase=2_5_6_0_0_8 , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=2_4 , _lowerCamelCase=1_6 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ): UpperCamelCase_: Optional[Any] = vocab_size UpperCamelCase_: Optional[int] = max_position_embeddings UpperCamelCase_: List[str] = d_model UpperCamelCase_: List[Any] = ffn_dim UpperCamelCase_: List[Any] = num_layers UpperCamelCase_: List[Any] = attention_heads UpperCamelCase_: Tuple = activation_function UpperCamelCase_: Tuple = dropout UpperCamelCase_: Tuple = attention_dropout UpperCamelCase_: Optional[Any] = activation_dropout UpperCamelCase_: List[str] = layerdrop UpperCamelCase_: Any = init_std UpperCamelCase_: Any = scale_embedding # scale factor will be sqrt(d_model) if True UpperCamelCase_: Union[str, Any] = use_cache super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
292
0
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : int ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=__UpperCAmelCase , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) SCREAMING_SNAKE_CASE__ = controlnet_params SCREAMING_SNAKE_CASE__ = """bird""" SCREAMING_SNAKE_CASE__ = jax.device_count() SCREAMING_SNAKE_CASE__ = pipe.prepare_text_inputs([prompts] * num_samples ) SCREAMING_SNAKE_CASE__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) SCREAMING_SNAKE_CASE__ = pipe.prepare_image_inputs([canny_image] * num_samples ) SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.split(__UpperCAmelCase , jax.device_count() ) SCREAMING_SNAKE_CASE__ = replicate(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = shard(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = shard(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = pipe( prompt_ids=__UpperCAmelCase , image=__UpperCAmelCase , params=__UpperCAmelCase , prng_seed=__UpperCAmelCase , num_inference_steps=5_0 , jit=__UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) SCREAMING_SNAKE_CASE__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) SCREAMING_SNAKE_CASE__ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] SCREAMING_SNAKE_CASE__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) SCREAMING_SNAKE_CASE__ = jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=__UpperCAmelCase , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) SCREAMING_SNAKE_CASE__ = controlnet_params SCREAMING_SNAKE_CASE__ = """Chef in the kitchen""" SCREAMING_SNAKE_CASE__ = jax.device_count() SCREAMING_SNAKE_CASE__ = pipe.prepare_text_inputs([prompts] * num_samples ) SCREAMING_SNAKE_CASE__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) SCREAMING_SNAKE_CASE__ = pipe.prepare_image_inputs([pose_image] * num_samples ) SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.split(__UpperCAmelCase , jax.device_count() ) SCREAMING_SNAKE_CASE__ = replicate(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = shard(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = shard(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = pipe( prompt_ids=__UpperCAmelCase , image=__UpperCAmelCase , params=__UpperCAmelCase , prng_seed=__UpperCAmelCase , num_inference_steps=5_0 , jit=__UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) SCREAMING_SNAKE_CASE__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) SCREAMING_SNAKE_CASE__ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] SCREAMING_SNAKE_CASE__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) SCREAMING_SNAKE_CASE__ = jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
165
"""simple docstring""" def A ( snake_case__ = 50 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F'{solution() = }')
165
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { 'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'], 'tokenization_lxmert': ['LxmertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['LxmertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'LxmertEncoder', 'LxmertForPreTraining', 'LxmertForQuestionAnswering', 'LxmertModel', 'LxmertPreTrainedModel', 'LxmertVisualFeatureEncoder', 'LxmertXLayer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLxmertForPreTraining', 'TFLxmertMainLayer', 'TFLxmertModel', 'TFLxmertPreTrainedModel', 'TFLxmertVisualFeatureEncoder', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
75
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: List[Any] , *__A: Union[str, Any] , **__A: Optional[Any] ) -> None: warnings.warn( '''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DPTImageProcessor instead.''' , __A , ) super().__init__(*__A , **__A )
75
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = SwinConfig(image_size=192 ) if "base" in model_name: __lowerCAmelCase = 6 __lowerCAmelCase = 128 __lowerCAmelCase = (2, 2, 18, 2) __lowerCAmelCase = (4, 8, 16, 32) elif "large" in model_name: __lowerCAmelCase = 12 __lowerCAmelCase = 192 __lowerCAmelCase = (2, 2, 18, 2) __lowerCAmelCase = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) __lowerCAmelCase = window_size __lowerCAmelCase = embed_dim __lowerCAmelCase = depths __lowerCAmelCase = num_heads return config def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if "encoder.mask_token" in name: __lowerCAmelCase = name.replace("encoder.mask_token" , "embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: __lowerCAmelCase = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: __lowerCAmelCase = name.replace("encoder.patch_embed.norm" , "embeddings.norm" ) if "attn.proj" in name: __lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: __lowerCAmelCase = name.replace("attn" , "attention.self" ) if "norm1" in name: __lowerCAmelCase = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __lowerCAmelCase = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" ) if name == "encoder.norm.weight": __lowerCAmelCase = "layernorm.weight" if name == "encoder.norm.bias": __lowerCAmelCase = "layernorm.bias" if "decoder" in name: pass else: __lowerCAmelCase = "swin." + name return name def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(_UpperCamelCase ) if "attn_mask" in key: pass elif "qkv" in key: __lowerCAmelCase = key.split("." ) __lowerCAmelCase = int(key_split[2] ) __lowerCAmelCase = int(key_split[4] ) __lowerCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[ dim : dim * 2, : ] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[ :dim ] __lowerCAmelCase = val[ dim : dim * 2 ] __lowerCAmelCase = val[ -dim: ] else: __lowerCAmelCase = val return orig_state_dict def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )["model"] __lowerCAmelCase = get_swin_config(_UpperCamelCase ) __lowerCAmelCase = SwinForMaskedImageModeling(_UpperCamelCase ) model.eval() __lowerCAmelCase = convert_state_dict(_UpperCamelCase , _UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) __lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCAmelCase = ViTImageProcessor(size={"height": 192, "width": 192} ) __lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) __lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="pt" ) with torch.no_grad(): __lowerCAmelCase = model(**_UpperCamelCase ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_UpperCamelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_UpperCamelCase ) if push_to_hub: print(f"Pushing model and image processor for {model_name} to hub" ) model.push_to_hub(f"microsoft/{model_name}" ) image_processor.push_to_hub(f"microsoft/{model_name}" ) if __name__ == "__main__": A : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="swin-base-simmim-window6-192", type=str, choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"], help="Name of the Swin SimMIM model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth", type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) A : Optional[int] = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
57
"""simple docstring""" import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , __a , __a=None , __a=True , __a=None , **__a ): __lowerCAmelCase = parent __lowerCAmelCase = config_class __lowerCAmelCase = has_text_modality __lowerCAmelCase = kwargs __lowerCAmelCase = common_properties def snake_case ( self ): __lowerCAmelCase = self.config_class(**self.inputs_dict ) __lowerCAmelCase = ( ["hidden_size", "num_attention_heads", "num_hidden_layers"] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["vocab_size"] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" ) # Test that config has the common properties as setter for idx, name in enumerate(__a ): try: setattr(__a , __a , __a ) self.parent.assertEqual( getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__a ): try: __lowerCAmelCase = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def snake_case ( self ): __lowerCAmelCase = self.config_class(**self.inputs_dict ) __lowerCAmelCase = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __a ) def snake_case ( self ): __lowerCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase = os.path.join(__a , "config.json" ) config_first.to_json_file(__a ) __lowerCAmelCase = self.config_class.from_json_file(__a ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self ): __lowerCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__a ) __lowerCAmelCase = self.config_class.from_pretrained(__a ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self ): __lowerCAmelCase = self.config_class(**self.inputs_dict ) __lowerCAmelCase = "test" with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase = os.path.join(__a , __a ) config_first.save_pretrained(__a ) __lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self ): __lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) __lowerCAmelCase = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def snake_case ( self ): if self.config_class.is_composition: return __lowerCAmelCase = self.config_class() self.parent.assertIsNotNone(__a ) def snake_case ( self ): __lowerCAmelCase = copy.deepcopy(__a ) __lowerCAmelCase = self.config_class(**__a ) __lowerCAmelCase = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) ) elif getattr(__a , __a ) != value: wrong_values.append((key, getattr(__a , __a ), value) ) if len(__a ) > 0: __lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] ) raise ValueError(f"The following keys were not properly set in the config:\n{errors}" ) def snake_case ( self ): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
57
1
def A ( _lowercase , _lowercase ): def get_matched_characters(_lowercase , _lowercase ) -> str: SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[Any] = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): SCREAMING_SNAKE_CASE : List[Any] = int(max(0 , i - limit ) ) SCREAMING_SNAKE_CASE : Dict = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(_lowercase ) SCREAMING_SNAKE_CASE : List[Any] = f"""{_stra[0:_stra.index(_lowercase )]} {_stra[_stra.index(_lowercase ) + 1:]}""" return "".join(_lowercase ) # matching characters SCREAMING_SNAKE_CASE : List[Any] = get_matched_characters(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE : List[Any] = get_matched_characters(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase ) # transposition SCREAMING_SNAKE_CASE : List[str] = ( len([(ca, ca) for ca, ca in zip(_lowercase , _lowercase ) if ca != ca] ) // 2 ) if not match_count: SCREAMING_SNAKE_CASE : Tuple = 0.0 else: SCREAMING_SNAKE_CASE : List[str] = ( 1 / 3 * ( match_count / len(_lowercase ) + match_count / len(_lowercase ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
258
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split __UpperCamelCase : Union[str, Any] = datasets.load_iris() __UpperCamelCase : Any = np.array(data['data']) __UpperCamelCase : Dict = np.array(data['target']) __UpperCamelCase : Union[str, Any] = data['target_names'] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = train_test_split(X, y) def A ( _lowercase , _lowercase ): return np.linalg.norm(np.array(_lowercase ) - np.array(_lowercase ) ) def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=5 ): SCREAMING_SNAKE_CASE : int = zip(_lowercase , _lowercase ) # List of distances of all points from the point to be classified SCREAMING_SNAKE_CASE : str = [] for data_point in data: SCREAMING_SNAKE_CASE : Optional[Any] = euclidean_distance(data_point[0] , _lowercase ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. SCREAMING_SNAKE_CASE : Optional[int] = [i[1] for i in sorted(_lowercase )[:k]] # Most commonly occurring class among them # is the class into which the point is classified SCREAMING_SNAKE_CASE : Optional[int] = Counter(_lowercase ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
258
1
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> "list[int]": if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) __lowerCamelCase : Tuple = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 __lowerCamelCase : Tuple = 1 if upper_limit > 0: __lowerCamelCase : Optional[Any] = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowerCamelCase__ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: a =int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
73
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def lowerCamelCase__ ( _A , _A ): if inductance <= 0: raise ValueError('Inductance cannot be 0 or negative' ) elif capacitance <= 0: raise ValueError('Capacitance cannot be 0 or negative' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
297
0
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ : Union[str, Any] = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' UpperCAmelCase_ : Optional[Any] = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' UpperCAmelCase_ : List[Any] = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' UpperCAmelCase_ : List[str] = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' UpperCAmelCase_ : int = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : Tuple ): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def _A ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str=[1, 10, 100] , __lowerCamelCase : Dict=4 , __lowerCamelCase : Optional[Any]=3.0 ): if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=__lowerCamelCase ) as executor: UpperCamelCase :Optional[Any] = [] UpperCamelCase :int = Counter() UpperCamelCase :Optional[int] = 0 UpperCamelCase :Tuple = defaultdict(__lowerCamelCase ) for task_id, (candidates, test_case) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ): for candidate in candidates: UpperCamelCase :Optional[Any] = candidate + """\n""" + test_case UpperCamelCase :Tuple = (test_program, timeout, task_id, completion_id[task_id]) UpperCamelCase :Any = executor.submit(__lowerCamelCase , *__lowerCamelCase ) futures.append(__lowerCamelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__lowerCamelCase ): UpperCamelCase :int = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCamelCase , UpperCamelCase :Optional[int] = [], [] for result in results.values(): result.sort() UpperCamelCase :Optional[int] = [r[1]["""passed"""] for r in result] total.append(len(__lowerCamelCase ) ) correct.append(sum(__lowerCamelCase ) ) UpperCamelCase :Dict = np.array(__lowerCamelCase ) UpperCamelCase :int = np.array(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = k UpperCamelCase :Any = {F"""pass@{k}""": estimate_pass_at_k(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : int ) -> Optional[Any]: """simple docstring""" def estimator(__magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__magic_name__ , __magic_name__ ): UpperCamelCase :List[Any] = itertools.repeat(__magic_name__ , len(__magic_name__ ) ) else: assert len(__magic_name__ ) == len(__magic_name__ ) UpperCamelCase :Optional[int] = iter(__magic_name__ ) return np.array([estimator(int(__magic_name__ ) , int(__magic_name__ ) , __magic_name__ ) for n, c in zip(__magic_name__ , __magic_name__ )] )
62
import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : str ) -> Optional[int]: """simple docstring""" return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Tuple="attention" ) -> Optional[int]: """simple docstring""" UpperCamelCase :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] ) UpperCamelCase :Any = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) UpperCamelCase :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] ) UpperCamelCase :Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) UpperCamelCase :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] ) UpperCamelCase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) UpperCamelCase :int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] ) UpperCamelCase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=False ) -> Optional[int]: """simple docstring""" if split_mlp_wi: UpperCamelCase :Tuple = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :] UpperCamelCase :Optional[int] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :] UpperCamelCase :Union[str, Any] = (wi_a, wi_a) else: UpperCamelCase :Union[str, Any] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :] UpperCamelCase :List[str] = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :] return wi, wo def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str ) -> List[Any]: """simple docstring""" return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict , *, __magic_name__ : int , __magic_name__ : bool , __magic_name__ : bool = False ) -> List[str]: """simple docstring""" UpperCamelCase :str = traverse_util.flatten_dict(variables["""target"""] ) UpperCamelCase :int = {"""/""".join(__magic_name__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCamelCase :Union[str, Any] = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , __magic_name__ ) UpperCamelCase :Tuple = collections.OrderedDict() # Shared embeddings. UpperCamelCase :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(__magic_name__ ): # Block i, layer 0 (Self Attention). UpperCamelCase :List[Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_attention_layer_norm""" ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = tax_attention_lookup(__magic_name__ , __magic_name__ , """encoder""" , """attention""" ) UpperCamelCase :Dict = layer_norm UpperCamelCase :str = k.T UpperCamelCase :int = o.T UpperCamelCase :Optional[int] = q.T UpperCamelCase :List[str] = v.T # Block i, layer 1 (MLP). UpperCamelCase :Union[str, Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_mlp_layer_norm""" ) UpperCamelCase , UpperCamelCase :List[Any] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """encoder""" , __magic_name__ ) UpperCamelCase :Dict = layer_norm if split_mlp_wi: UpperCamelCase :Union[str, Any] = wi[0].T UpperCamelCase :List[str] = wi[1].T else: UpperCamelCase :str = wi.T UpperCamelCase :Dict = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCamelCase :List[Any] = tax_relpos_bias_lookup( __magic_name__ , __magic_name__ , """encoder""" ).T UpperCamelCase :Dict = old["""encoder/encoder_norm/scale"""] if not scalable_attention: UpperCamelCase :Optional[Any] = tax_relpos_bias_lookup( __magic_name__ , 0 , """encoder""" ).T UpperCamelCase :str = tax_relpos_bias_lookup( __magic_name__ , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(__magic_name__ ): # Block i, layer 0 (Self Attention). UpperCamelCase :Tuple = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_self_attention_layer_norm""" ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """self_attention""" ) UpperCamelCase :Any = layer_norm UpperCamelCase :Tuple = k.T UpperCamelCase :Optional[Any] = o.T UpperCamelCase :List[Any] = q.T UpperCamelCase :Optional[Any] = v.T # Block i, layer 1 (Cross Attention). UpperCamelCase :List[str] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_cross_attention_layer_norm""" ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :int = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """encoder_decoder_attention""" ) UpperCamelCase :Union[str, Any] = layer_norm UpperCamelCase :int = k.T UpperCamelCase :Union[str, Any] = o.T UpperCamelCase :Optional[Any] = q.T UpperCamelCase :List[str] = v.T # Block i, layer 2 (MLP). UpperCamelCase :Tuple = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_mlp_layer_norm""" ) UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """decoder""" , __magic_name__ ) UpperCamelCase :Optional[int] = layer_norm if split_mlp_wi: UpperCamelCase :List[Any] = wi[0].T UpperCamelCase :Tuple = wi[1].T else: UpperCamelCase :Any = wi.T UpperCamelCase :List[Any] = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCamelCase :Optional[int] = tax_relpos_bias_lookup(__magic_name__ , __magic_name__ , """decoder""" ).T UpperCamelCase :int = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCamelCase :Dict = old["""decoder/logits_dense/kernel"""].T return new def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : bool ) -> Union[str, Any]: """simple docstring""" UpperCamelCase :Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCamelCase :str = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCamelCase :Union[str, Any] = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) UpperCamelCase :Dict = state_dict["""shared.weight"""] return state_dict def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int ) -> List[str]: """simple docstring""" UpperCamelCase :Union[str, Any] = checkpoints.load_tax_checkpoint(__magic_name__ ) UpperCamelCase :Optional[Any] = convert_tax_to_pytorch( __magic_name__ , num_layers=config.num_layers , is_encoder_only=__magic_name__ , scalable_attention=__magic_name__ ) UpperCamelCase :Optional[int] = make_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ , strict=__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> List[str]: """simple docstring""" UpperCamelCase :Tuple = MTaConfig.from_json_file(__magic_name__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCamelCase :Optional[Any] = UMTaEncoderModel(__magic_name__ ) else: UpperCamelCase :Tuple = UMTaForConditionalGeneration(__magic_name__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__magic_name__ ) # Verify that we can load the checkpoint. model.from_pretrained(__magic_name__ ) print("""Done""" ) if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) UpperCAmelCase_ : str = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
62
1
import requests from bsa import BeautifulSoup def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ): '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(snake_case_ , params=snake_case_ ).content , "html.parser" ) _UpperCAmelCase = soup.find("div" , attrs={"class": "gs_ri"} ) _UpperCAmelCase = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" ) return anchors[2].get_text() if __name__ == "__main__": lowercase_ : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
133
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowercase_ : Optional[int] = logging.get_logger(__name__) lowercase_ : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class __lowerCAmelCase ( UpperCAmelCase__ ): snake_case_ : Tuple = "codegen" snake_case_ : Optional[Any] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Tuple , snake_case__ : Any=50_400 , snake_case__ : int=2_048 , snake_case__ : Optional[Any]=2_048 , snake_case__ : Tuple=4_096 , snake_case__ : List[str]=28 , snake_case__ : List[Any]=16 , snake_case__ : int=64 , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : List[Any]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : Dict=1e-5 , snake_case__ : int=0.02 , snake_case__ : Union[str, Any]=True , snake_case__ : str=50_256 , snake_case__ : List[str]=50_256 , snake_case__ : Optional[int]=False , **snake_case__ : str , ): """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_ctx _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = n_inner _UpperCAmelCase = rotary_dim _UpperCAmelCase = activation_function _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id super().__init__( bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ ) class __lowerCAmelCase ( UpperCAmelCase__ ): def __init__( self : List[str] , snake_case__ : PretrainedConfig , snake_case__ : str = "default" , snake_case__ : List[PatchingSpec] = None , snake_case__ : bool = False , ): """simple docstring""" super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ ) if not getattr(self._config , "pad_token_id" , snake_case__ ): # TODO: how to do that better? _UpperCAmelCase = 0 @property def UpperCamelCase ( self : Tuple ): """simple docstring""" _UpperCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction="inputs" ) _UpperCAmelCase = {0: "batch", 1: "past_sequence + sequence"} else: _UpperCAmelCase = {0: "batch", 1: "sequence"} return common_inputs @property def UpperCamelCase ( self : int ): """simple docstring""" return self._config.n_layer @property def UpperCamelCase ( self : List[str] ): """simple docstring""" return self._config.n_head def UpperCamelCase ( self : List[Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ): """simple docstring""" _UpperCAmelCase = super(snake_case__ , self ).generate_dummy_inputs( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) # We need to order the input in the way they appears in the forward() _UpperCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _UpperCAmelCase , _UpperCAmelCase = common_inputs["input_ids"].shape # Not using the same length for past_key_values _UpperCAmelCase = seqlen + 2 _UpperCAmelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _UpperCAmelCase = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] _UpperCAmelCase = common_inputs["attention_mask"] if self.use_past: _UpperCAmelCase = ordered_inputs["attention_mask"].dtype _UpperCAmelCase = torch.cat( [ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) return ordered_inputs @property def UpperCamelCase ( self : Any ): """simple docstring""" return 13
133
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def __a ( __lowerCamelCase, __lowerCamelCase=False ) -> int: UpperCAmelCase_ : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ) -> str: for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase_ : int = "" else: UpperCAmelCase_ : Union[str, Any] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size] UpperCAmelCase_ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) -> List[Any]: UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase ) UpperCAmelCase_ : Tuple = val def __a ( ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def __a ( __lowerCamelCase, __lowerCamelCase ) -> List[str]: UpperCAmelCase_ : List[str] = DeiTConfig() # all deit models have fine-tuned heads UpperCAmelCase_ : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase_ : Tuple = 1000 UpperCAmelCase_ : str = "huggingface/label-files" UpperCAmelCase_ : str = "imagenet-1k-id2label.json" UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) ) UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ : Any = idalabel UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : Any = int(deit_name[-6:-4] ) UpperCAmelCase_ : Dict = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): UpperCAmelCase_ : Any = 192 UpperCAmelCase_ : Union[str, Any] = 768 UpperCAmelCase_ : Union[str, Any] = 12 UpperCAmelCase_ : int = 3 elif deit_name[9:].startswith("small" ): UpperCAmelCase_ : List[str] = 384 UpperCAmelCase_ : List[str] = 1536 UpperCAmelCase_ : Dict = 12 UpperCAmelCase_ : Any = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): UpperCAmelCase_ : int = 1024 UpperCAmelCase_ : List[Any] = 4096 UpperCAmelCase_ : Optional[int] = 24 UpperCAmelCase_ : int = 16 # load original model from timm UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCAmelCase_ : Optional[Any] = timm_model.state_dict() UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # load HuggingFace model UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor UpperCAmelCase_ : Union[str, Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size ) UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" ) UpperCAmelCase_ : int = encoding["pixel_values"] UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase ) UpperCAmelCase_ : Any = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
369
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _a = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )] if identifier is not None: UpperCAmelCase_ : Dict = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_ ): for n_ in n_identifier: UpperCAmelCase_ : str = [file for file in files if n_ not in file] else: UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file] UpperCAmelCase_ : Union[str, Any] = ignore_files or [] ignore_files.append("__init__.py" ) UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , lowercase_ ) if only_modules: UpperCAmelCase_ : str = file.split("." )[0] try: UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ ) UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = Path("src/transformers" ) UpperCAmelCase_ : str = "modeling" UpperCAmelCase_ : Optional[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = Path("src/transformers" ) UpperCAmelCase_ : Any = "tokenization" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = "configuration" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"] self.analyze_directory(lowercase_ , n_identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = Path("docs/source" ) UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
23
0
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Dict = '''T5Config''' def SCREAMING_SNAKE_CASE_ ( __magic_name__ : jnp.array , __magic_name__ : int , __magic_name__ : int ) -> jnp.ndarray: """simple docstring""" UpperCamelCase :List[str] = jnp.zeros_like(__magic_name__ ) UpperCamelCase :str = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) UpperCamelCase :Tuple = shifted_input_ids.at[:, 0].set(__magic_name__ ) UpperCamelCase :int = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ ) return shifted_input_ids class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : List[Any] = """mt5""" snake_case__ : Optional[Any] = MTaConfig class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Optional[int] = """mt5""" snake_case__ : Any = MTaConfig class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Optional[Any] = """mt5""" snake_case__ : List[Any] = MTaConfig
38
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : str = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Optional[int] = """layoutlmv3""" def __init__( self : List[Any] , __lowerCamelCase : Optional[Any]=50_265 , __lowerCamelCase : Dict=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : int=12 , __lowerCamelCase : str=3_072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Union[str, Any]=1E-5 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Dict=1_024 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=128 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=32 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=64 , __lowerCamelCase : List[str]=256 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=224 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[Any] , ): super().__init__( vocab_size=__lowerCamelCase , hidden_size=__lowerCamelCase , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , intermediate_size=__lowerCamelCase , hidden_act=__lowerCamelCase , hidden_dropout_prob=__lowerCamelCase , attention_probs_dropout_prob=__lowerCamelCase , max_position_embeddings=__lowerCamelCase , type_vocab_size=__lowerCamelCase , initializer_range=__lowerCamelCase , layer_norm_eps=__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :int = max_ad_position_embeddings UpperCamelCase :Tuple = coordinate_size UpperCamelCase :List[Any] = shape_size UpperCamelCase :Union[str, Any] = has_relative_attention_bias UpperCamelCase :Any = rel_pos_bins UpperCamelCase :Optional[Any] = max_rel_pos UpperCamelCase :str = has_spatial_attention_bias UpperCamelCase :Tuple = rel_ad_pos_bins UpperCamelCase :Optional[int] = max_rel_ad_pos UpperCamelCase :Tuple = text_embed UpperCamelCase :str = visual_embed UpperCamelCase :Optional[Any] = input_size UpperCamelCase :str = num_channels UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : int = version.parse("""1.12""" ) @property def _A ( self : Optional[int] ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def _A ( self : str ): return 1E-5 @property def _A ( self : Dict ): return 12 def _A ( self : Dict , __lowerCamelCase : "ProcessorMixin" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 40 , __lowerCamelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCamelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase :Optional[Any] = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase :Optional[int] = processor.tokenizer.num_special_tokens_to_add(__lowerCamelCase ) UpperCamelCase :int = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase :Any = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCamelCase :Optional[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCamelCase :List[str] = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Any = dict( processor( __lowerCamelCase , text=__lowerCamelCase , boxes=__lowerCamelCase , return_tensors=__lowerCamelCase , ) ) return inputs
38
1
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ): """simple docstring""" A = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" A = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("RGB" ) A = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ), ] ) A = transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ ) return image def __SCREAMING_SNAKE_CASE ( lowercase__ ): """simple docstring""" if "visual_encoder" in key: A = re.sub("visual_encoder*" , "vision_model.encoder" , lowercase__ ) if "blocks" in key: A = re.sub(r"blocks" , "layers" , lowercase__ ) if "attn" in key: A = re.sub(r"attn" , "self_attn" , lowercase__ ) if "norm1" in key: A = re.sub(r"norm1" , "layer_norm1" , lowercase__ ) if "norm2" in key: A = re.sub(r"norm2" , "layer_norm2" , lowercase__ ) if "encoder.norm" in key: A = re.sub(r"encoder.norm" , "post_layernorm" , lowercase__ ) if "encoder.patch_embed.proj" in key: A = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , lowercase__ ) if "encoder.pos_embed" in key: A = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , lowercase__ ) if "encoder.cls_token" in key: A = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , lowercase__ ) if "self_attn" in key: A = re.sub(r"self_attn.proj" , "self_attn.projection" , lowercase__ ) return key @torch.no_grad() def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=None ): """simple docstring""" if config_path is not None: A = BlipConfig.from_pretrained(lowercase__ ) else: A = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) A = BlipForConditionalGeneration(lowercase__ ).eval() A = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" A = blip_decoder(pretrained=lowercase__ , image_size=384 , vit="base" ) A = pt_model.eval() A = pt_model.state_dict() for key in modified_state_dict.copy(): A = modified_state_dict.pop(lowercase__ ) A = rename_key(lowercase__ ) A = value hf_model.load_state_dict(lowercase__ ) A = 384 A = load_demo_image(image_size=lowercase__ , device="cpu" ) A = BertTokenizer.from_pretrained("bert-base-uncased" ) A = tokenizer(["a picture of"] ).input_ids A = hf_model.generate(lowercase__ , lowercase__ ) assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102] A = hf_model.generate(lowercase__ ) assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(lowercase__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' A = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" ) A = blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit="base" ) vqa_model.eval() A = vqa_model.state_dict() for key in modified_state_dict.copy(): A = modified_state_dict.pop(lowercase__ ) A = rename_key(lowercase__ ) A = value A = BlipForQuestionAnswering(lowercase__ ) hf_vqa_model.load_state_dict(lowercase__ ) A = ["How many dogs are in this image?"] A = tokenizer(lowercase__ , return_tensors="pt" ).input_ids A = hf_vqa_model.generate(lowercase__ , lowercase__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" ) A = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" A = blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit="base" ) itm_model.eval() A = itm_model.state_dict() for key in modified_state_dict.copy(): A = modified_state_dict.pop(lowercase__ ) A = rename_key(lowercase__ ) A = value A = BlipForImageTextRetrieval(lowercase__ ) A = ["A picture of a woman with a dog sitting in a beach"] A = tokenizer( lowercase__ , return_tensors="pt" , padding="max_length" , truncation=lowercase__ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(lowercase__ ) hf_itm_model.eval() A = hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) A = hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) assert out[0].item() == 0.21_10_68_74_94_27_79_54 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" ) if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __A : Optional[Any] = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
356
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
57
0
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) snake_case : Optional[Any] = { '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 10_00, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } snake_case : List[str] = { '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 10_00, '''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } snake_case : Optional[Any] = { '''sample_size''': 2_56, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } snake_case : List[str] = { '''num_train_timesteps''': 40, '''sigma_min''': 0.0_02, '''sigma_max''': 80.0, } snake_case : int = { '''num_train_timesteps''': 2_01, '''sigma_min''': 0.0_02, '''sigma_max''': 80.0, } snake_case : Any = { '''num_train_timesteps''': 1_51, '''sigma_min''': 0.0_02, '''sigma_max''': 80.0, } def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]=False ): """simple docstring""" a :Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] a :str = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] a :str = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] a :List[str] = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] a :str = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] a :str = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] a :Optional[int] = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] a :Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] a :Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] a :List[Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: a :Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.weight'''] a :str = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None ): """simple docstring""" a , a , a :List[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) a , a , a :Any = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) a :Union[str, Any] = checkpoint[F'''{old_prefix}.norm.weight'''] a :Union[str, Any] = checkpoint[F'''{old_prefix}.norm.bias'''] a :int = weight_q.squeeze(-1 ).squeeze(-1 ) a :Any = bias_q.squeeze(-1 ).squeeze(-1 ) a :Union[str, Any] = weight_k.squeeze(-1 ).squeeze(-1 ) a :str = bias_k.squeeze(-1 ).squeeze(-1 ) a :List[str] = weight_v.squeeze(-1 ).squeeze(-1 ) a :List[str] = bias_v.squeeze(-1 ).squeeze(-1 ) a :Dict = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) a :int = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): """simple docstring""" a :Any = torch.load(UpperCAmelCase_ , map_location='''cpu''' ) a :Optional[int] = {} a :Optional[int] = checkpoint['''time_embed.0.weight'''] a :Optional[int] = checkpoint['''time_embed.0.bias'''] a :Any = checkpoint['''time_embed.2.weight'''] a :List[Any] = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: a :Optional[Any] = checkpoint['''label_emb.weight'''] a :Optional[int] = checkpoint['''input_blocks.0.0.weight'''] a :List[Any] = checkpoint['''input_blocks.0.0.bias'''] a :List[str] = unet_config['''down_block_types'''] a :Optional[int] = unet_config['''layers_per_block'''] a :int = unet_config['''attention_head_dim'''] a :Optional[int] = unet_config['''block_out_channels'''] a :Union[str, Any] = 1 a :Optional[Any] = channels_list[0] for i, layer_type in enumerate(UpperCAmelCase_ ): a :str = channels_list[i] a :int = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(UpperCAmelCase_ ): a :Dict = F'''down_blocks.{i}.resnets.{j}''' a :Optional[int] = F'''input_blocks.{current_layer}.0''' a :Dict = True if j == 0 and downsample_block_has_skip else False a :Dict = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(UpperCAmelCase_ ): a :Any = F'''down_blocks.{i}.resnets.{j}''' a :Dict = F'''input_blocks.{current_layer}.0''' a :Optional[Any] = True if j == 0 and downsample_block_has_skip else False a :Union[str, Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) a :Tuple = F'''down_blocks.{i}.attentions.{j}''' a :Union[str, Any] = F'''input_blocks.{current_layer}.1''' a :Optional[int] = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: a :int = F'''down_blocks.{i}.downsamplers.0''' a :List[str] = F'''input_blocks.{current_layer}.0''' a :List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 a :Union[str, Any] = current_channels # hardcoded the mid-block for now a :List[str] = '''mid_block.resnets.0''' a :Any = '''middle_block.0''' a :Union[str, Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) a :int = '''mid_block.attentions.0''' a :Any = '''middle_block.1''' a :Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) a :int = '''mid_block.resnets.1''' a :Union[str, Any] = '''middle_block.2''' a :Dict = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) a :int = 0 a :Any = unet_config['''up_block_types'''] for i, layer_type in enumerate(UpperCAmelCase_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): a :Any = F'''up_blocks.{i}.resnets.{j}''' a :str = F'''output_blocks.{current_layer}.0''' a :Dict = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: a :str = F'''up_blocks.{i}.upsamplers.0''' a :Any = F'''output_blocks.{current_layer-1}.1''' a :List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): a :Tuple = F'''up_blocks.{i}.resnets.{j}''' a :Tuple = F'''output_blocks.{current_layer}.0''' a :List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) a :List[str] = F'''up_blocks.{i}.attentions.{j}''' a :Dict = F'''output_blocks.{current_layer}.1''' a :List[str] = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: a :Optional[int] = F'''up_blocks.{i}.upsamplers.0''' a :Optional[Any] = F'''output_blocks.{current_layer-1}.2''' a :Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) a :Optional[Any] = checkpoint['''out.0.weight'''] a :List[Any] = checkpoint['''out.0.bias'''] a :Tuple = checkpoint['''out.2.weight'''] a :List[str] = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') snake_case : Union[str, Any] = parser.parse_args() snake_case : int = strabool(args.class_cond) snake_case : Optional[Any] = os.path.basename(args.unet_path) print(F"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: snake_case : Dict = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: snake_case : Any = TEST_UNET_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: snake_case : Optional[Any] = None snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config) snake_case : Tuple = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: snake_case : Union[str, Any] = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: snake_case : str = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): snake_case : Optional[Any] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") snake_case : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config) snake_case : Any = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
94
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
0
"""simple docstring""" import sys _A = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def a__ ( lowerCAmelCase = N ) -> int: UpperCAmelCase__ : List[str] = -sys.maxsize - 1 for i in range(len(lowerCAmelCase ) - 12 ): UpperCAmelCase__ : Optional[Any] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: UpperCAmelCase__ : str = product return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
166
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = dataset UpperCAmelCase__ : Union[str, Any] = process UpperCAmelCase__ : List[Any] = params def __len__(self ): """simple docstring""" return len(self.dataset ) def __getitem__(self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : int = self.dataset[i] UpperCAmelCase__ : Any = self.process(_lowerCamelCase , **self.params ) return processed class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ): """simple docstring""" UpperCAmelCase__ : Tuple = loader UpperCAmelCase__ : int = infer UpperCAmelCase__ : Optional[Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : Any = loader_batch_size # Internal bookkeeping UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : Union[str, Any] = None def __len__(self ): """simple docstring""" return len(self.loader ) def __iter__(self ): """simple docstring""" UpperCAmelCase__ : List[str] = iter(self.loader ) return self def _a (self ): """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice UpperCAmelCase__ : Optional[int] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) UpperCAmelCase__ : List[str] = {} for k, element in self._loader_batch_data.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): # Convert ModelOutput to tuple first UpperCAmelCase__ : List[Any] = element.to_tuple() if isinstance(element[0] , torch.Tensor ): UpperCAmelCase__ : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): UpperCAmelCase__ : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCamelCase , _lowerCamelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): UpperCAmelCase__ : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): UpperCAmelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around UpperCAmelCase__ : str = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase__ : Tuple = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase__ : Dict = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. UpperCAmelCase__ : Optional[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 UpperCAmelCase__ : Union[str, Any] = self._loader_batch_data.__class__(_lowerCamelCase ) self._loader_batch_index += 1 return result def _a (self ): """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch UpperCAmelCase__ : str = next(self.iterator ) UpperCAmelCase__ : Union[str, Any] = self.infer(_lowerCamelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_lowerCamelCase , torch.Tensor ): UpperCAmelCase__ : List[Any] = processed else: UpperCAmelCase__ : List[str] = list(processed.keys() )[0] UpperCAmelCase__ : List[str] = processed[key] if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCAmelCase__ : Any = len(_lowerCamelCase ) else: UpperCAmelCase__ : Union[str, Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase__ : Optional[int] = observed_batch_size # Setting internal index to unwrap the batch UpperCAmelCase__ : List[Any] = processed UpperCAmelCase__ : Optional[int] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ): """simple docstring""" super().__init__(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def __iter__(self ): """simple docstring""" UpperCAmelCase__ : Tuple = iter(self.loader ) UpperCAmelCase__ : List[Any] = None return self def _a (self ): """simple docstring""" if self.subiterator is None: UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item UpperCAmelCase__ : List[str] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) UpperCAmelCase__ : List[str] = next(self.subiterator ) return processed class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __iter__(self ): """simple docstring""" UpperCAmelCase__ : str = iter(self.loader ) return self def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase__ : List[Any] = self.loader_batch_item() UpperCAmelCase__ : Dict = item.pop("""is_last""" ) accumulator.append(_lowerCamelCase ) if is_last: return accumulator while not is_last: UpperCAmelCase__ : List[str] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_lowerCamelCase , torch.Tensor ): UpperCAmelCase__ : Dict = processed else: UpperCAmelCase__ : List[Any] = list(processed.keys() )[0] UpperCAmelCase__ : List[Any] = processed[key] if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCAmelCase__ : int = len(_lowerCamelCase ) else: UpperCAmelCase__ : List[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase__ : str = observed_batch_size UpperCAmelCase__ : Union[str, Any] = processed UpperCAmelCase__ : List[Any] = 0 while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase__ : Union[str, Any] = self.loader_batch_item() UpperCAmelCase__ : int = item.pop("""is_last""" ) accumulator.append(_lowerCamelCase ) if is_last: return accumulator else: UpperCAmelCase__ : Any = processed UpperCAmelCase__ : Any = item.pop("""is_last""" ) accumulator.append(_lowerCamelCase ) return accumulator class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = dataset UpperCAmelCase__ : Union[str, Any] = key def __len__(self ): """simple docstring""" return len(self.dataset ) def __getitem__(self , _lowerCamelCase ): """simple docstring""" return self.dataset[i][self.key] class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : int = dataset UpperCAmelCase__ : Any = keya UpperCAmelCase__ : str = keya def __len__(self ): """simple docstring""" return len(self.dataset ) def __getitem__(self , _lowerCamelCase ): """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
166
1
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> int: a : Tuple = inspect.getfile(accelerate.test_utils ) a : List[Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 a : Any = test_metrics @require_cpu def __a ( self ) -> Tuple: debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __a ( self ) -> List[Any]: debug_launcher(self.test_metrics.main ) @require_single_gpu def __a ( self ) -> Optional[int]: self.test_metrics.main() @require_multi_gpu def __a ( self ) -> Optional[int]: print(f"""Found {torch.cuda.device_count()} devices.""" ) a : Optional[int] = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
105
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : int ) -> list[list[int]]: __A : list[list[int]] = [] __A : list[int] = [] __A : Tuple = 0 __A : Optional[Any] = sum(__snake_case ) create_state_space_tree(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) return result def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : list[int] , __snake_case : list[list[int]] , __snake_case : int , ) -> None: if sum(__snake_case ) > max_sum or (remaining_nums_sum + sum(__snake_case )) < max_sum: return if sum(__snake_case ) == max_sum: result.append(__snake_case ) return for index in range(__snake_case , len(__snake_case ) ): create_state_space_tree( __snake_case , __snake_case , index + 1 , [*path, nums[index]] , __snake_case , remaining_nums_sum - nums[index] , ) lowercase__ : Union[str, Any] = [3, 34, 4, 12, 5, 2] lowercase__ : Optional[int] = 9 lowercase__ : Tuple = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
190
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __snake_case : list[int | str] ) -> None: create_state_space_tree(__snake_case , [] , 0 , [0 for i in range(len(__snake_case ) )] ) def _lowerCAmelCase ( __snake_case : list[int | str] , __snake_case : list[int | str] , __snake_case : int , __snake_case : list[int] , ) -> None: if index == len(__snake_case ): print(__snake_case ) return for i in range(len(__snake_case ) ): if not index_used[i]: current_sequence.append(sequence[i] ) __A : Any = True create_state_space_tree(__snake_case , __snake_case , index + 1 , __snake_case ) current_sequence.pop() __A : Any = False lowercase__ : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) lowercase__ : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
190
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _A = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} _A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ): __UpperCamelCase =True __UpperCamelCase =[] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) order.append(SCREAMING_SNAKE_CASE__ ) return order def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ): __UpperCamelCase =True __UpperCamelCase =[vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return component def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ): __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False] __UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ): if not was_visited: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1] if not visited[vert]: __UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) components_list.append(SCREAMING_SNAKE_CASE__ ) return components_list
62
1
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __lowerCamelCase ( snake_case__ ) -> List[Any]: """simple docstring""" return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _SCREAMING_SNAKE_CASE = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" ) _SCREAMING_SNAKE_CASE = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" ) _SCREAMING_SNAKE_CASE = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" ) _SCREAMING_SNAKE_CASE = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" ) _SCREAMING_SNAKE_CASE = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" ) _SCREAMING_SNAKE_CASE = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" ) _SCREAMING_SNAKE_CASE = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" ) _SCREAMING_SNAKE_CASE = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" ) _SCREAMING_SNAKE_CASE = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" ) _SCREAMING_SNAKE_CASE = key.replace("""image_encoder.module""" ,"""flava.image_model""" ) _SCREAMING_SNAKE_CASE = key.replace("""text_encoder.module""" ,"""flava.text_model""" ) _SCREAMING_SNAKE_CASE = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" ) _SCREAMING_SNAKE_CASE = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" ) _SCREAMING_SNAKE_CASE = key.replace("""text_projection""" ,"""flava.text_projection""" ) _SCREAMING_SNAKE_CASE = key.replace("""image_projection""" ,"""flava.image_projection""" ) _SCREAMING_SNAKE_CASE = value.float() for key, value in codebook_state_dict.items(): _SCREAMING_SNAKE_CASE = value return upgrade @torch.no_grad() def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ) -> Tuple: """simple docstring""" if config_path is not None: _SCREAMING_SNAKE_CASE = FlavaConfig.from_pretrained(snake_case__ ) else: _SCREAMING_SNAKE_CASE = FlavaConfig() _SCREAMING_SNAKE_CASE = FlavaForPreTraining(snake_case__ ).eval() _SCREAMING_SNAKE_CASE = convert_dalle_checkpoint(snake_case__ ,snake_case__ ,save_checkpoint=snake_case__ ) if os.path.exists(snake_case__ ): _SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" ) else: _SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(snake_case__ ,map_location="""cpu""" ) _SCREAMING_SNAKE_CASE = upgrade_state_dict(snake_case__ ,snake_case__ ) hf_model.load_state_dict(snake_case__ ) _SCREAMING_SNAKE_CASE = hf_model.state_dict() _SCREAMING_SNAKE_CASE = count_parameters(snake_case__ ) _SCREAMING_SNAKE_CASE = count_parameters(snake_case__ ) + count_parameters(snake_case__ ) assert torch.allclose(snake_case__ ,snake_case__ ,atol=1e-3 ) hf_model.save_pretrained(snake_case__ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') UpperCamelCase = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
125
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __lowerCamelCase ( snake_case__ ,snake_case__="shi-labs/oneformer_demo" ) -> Union[str, Any]: """simple docstring""" with open(hf_hub_download(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ,"""r""" ) as f: _SCREAMING_SNAKE_CASE = json.load(snake_case__ ) _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] for key, info in class_info.items(): _SCREAMING_SNAKE_CASE = info["""name"""] class_names.append(info["""name"""] ) if info["isthing"]: thing_ids.append(int(snake_case__ ) ) _SCREAMING_SNAKE_CASE = thing_ids _SCREAMING_SNAKE_CASE = class_names return metadata class __UpperCAmelCase (unittest.TestCase ): def __init__( self: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any]=7 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: Optional[int]=30 , UpperCAmelCase_: List[str]=400 , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_: int=[0.5, 0.5, 0.5] , UpperCAmelCase_: List[str]=10 , UpperCAmelCase_: Optional[int]=False , UpperCAmelCase_: Optional[int]=255 , UpperCAmelCase_: Tuple="shi-labs/oneformer_demo" , UpperCAmelCase_: Union[str, Any]="ade20k_panoptic.json" , UpperCAmelCase_: Union[str, Any]=10 , ): '''simple docstring''' _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = min_resolution _SCREAMING_SNAKE_CASE = max_resolution _SCREAMING_SNAKE_CASE = do_resize _SCREAMING_SNAKE_CASE = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size _SCREAMING_SNAKE_CASE = do_normalize _SCREAMING_SNAKE_CASE = image_mean _SCREAMING_SNAKE_CASE = image_std _SCREAMING_SNAKE_CASE = class_info_file _SCREAMING_SNAKE_CASE = prepare_metadata(UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = num_text _SCREAMING_SNAKE_CASE = repo_path # for the post_process_functions _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 10 _SCREAMING_SNAKE_CASE = 10 _SCREAMING_SNAKE_CASE = 3 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = do_reduce_labels _SCREAMING_SNAKE_CASE = ignore_index def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def UpperCamelCase ( self: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[str]=False ): '''simple docstring''' if not batched: _SCREAMING_SNAKE_CASE = image_inputs[0] if isinstance(UpperCAmelCase_ , Image.Image ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2] if w < h: _SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w ) _SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] elif w > h: _SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] _SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h ) else: _SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] _SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] else: _SCREAMING_SNAKE_CASE = [] for image in image_inputs: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0] _SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1] return expected_height, expected_width def UpperCamelCase ( self: Any ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): __snake_case : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __snake_case : int = image_processing_class def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = OneFormerImageProcessorTester(self ) @property def UpperCamelCase ( self: int ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def UpperCamelCase ( self: int ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """ignore_index""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """class_info_file""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """num_text""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """repo_path""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """metadata""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """do_reduce_labels""" ) ) def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' pass def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = image_processor( UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self: int ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input _SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = image_processor( UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self: Tuple ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input _SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = image_processor( UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple=False , UpperCAmelCase_: Any=False , UpperCAmelCase_: str="np" ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # prepare image and target _SCREAMING_SNAKE_CASE = self.image_processing_tester.num_labels _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ ) if with_segmentation_maps: _SCREAMING_SNAKE_CASE = num_labels if is_instance_map: _SCREAMING_SNAKE_CASE = list(range(UpperCAmelCase_ ) ) * 2 _SCREAMING_SNAKE_CASE = dict(enumerate(UpperCAmelCase_ ) ) _SCREAMING_SNAKE_CASE = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": _SCREAMING_SNAKE_CASE = [Image.fromarray(UpperCAmelCase_ ) for annotation in annotations] _SCREAMING_SNAKE_CASE = image_processor( UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , UpperCAmelCase_ , return_tensors="""pt""" , instance_id_to_semantic_id=UpperCAmelCase_ , pad_and_return_pixel_mask=UpperCAmelCase_ , ) return inputs def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' pass def UpperCamelCase ( self: Any ): '''simple docstring''' def common(UpperCAmelCase_: List[str]=False , UpperCAmelCase_: Optional[int]=None ): _SCREAMING_SNAKE_CASE = self.comm_get_image_processor_inputs( with_segmentation_maps=UpperCAmelCase_ , is_instance_map=UpperCAmelCase_ , segmentation_type=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = inputs["""mask_labels"""] _SCREAMING_SNAKE_CASE = inputs["""class_labels"""] _SCREAMING_SNAKE_CASE = inputs["""pixel_values"""] _SCREAMING_SNAKE_CASE = inputs["""text_inputs"""] # check the batch_size for mask_label, class_label, text_input in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(UpperCAmelCase_ ) , self.image_processing_tester.num_text ) common() common(is_instance_map=UpperCAmelCase_ ) common(is_instance_map=UpperCAmelCase_ , segmentation_type="""pil""" ) common(is_instance_map=UpperCAmelCase_ , segmentation_type="""pil""" ) def UpperCamelCase ( self: Any ): '''simple docstring''' _SCREAMING_SNAKE_CASE = np.zeros((20, 50) ) _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = binary_mask_to_rle(UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def UpperCamelCase ( self: str ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs() _SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) _SCREAMING_SNAKE_CASE = [(1, 4) for i in range(self.image_processing_tester.batch_size )] _SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase_ , target_sizes=UpperCAmelCase_ ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs() _SCREAMING_SNAKE_CASE = image_processor.post_process_instance_segmentation(UpperCAmelCase_ , threshold=0 ) self.assertTrue(len(UpperCAmelCase_ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) , UpperCAmelCase_ ) self.assertEqual( el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def UpperCamelCase ( self: List[Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs() _SCREAMING_SNAKE_CASE = image_processor.post_process_panoptic_segmentation(UpperCAmelCase_ , threshold=0 ) self.assertTrue(len(UpperCAmelCase_ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) , UpperCAmelCase_ ) self.assertEqual( el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
125
1
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = """▁""" lowercase_ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", """tokenizer_config_file""": """tokenizer_config.json""", } lowercase_ = { """vocab_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""", }, """spm_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_config_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""", }, } lowercase_ = { """facebook/m2m100_418M""": 1_024, } # fmt: off lowercase_ = { """m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""], """wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""] } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] lowerCAmelCase_ = [] lowerCAmelCase_ = [] def __init__( self : Optional[Any] , _A : Optional[int] , _A : Any , _A : Any=None , _A : Union[str, Any]=None , _A : Dict="<s>" , _A : Dict="</s>" , _A : Optional[int]="</s>" , _A : Optional[int]="<pad>" , _A : Tuple="<unk>" , _A : Tuple="m2m100" , _A : Optional[Dict[str, Any]] = None , _A : int=8 , **_A : Dict , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs __SCREAMING_SNAKE_CASE : int = language_codes __SCREAMING_SNAKE_CASE : List[str] = FAIRSEQ_LANGUAGE_CODES[language_codes] __SCREAMING_SNAKE_CASE : Dict = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code} __SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(_A ) for lang_code in fairseq_language_code if self.get_lang_token(_A ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_A , tgt_lang=_A , bos_token=_A , eos_token=_A , sep_token=_A , unk_token=_A , pad_token=_A , language_codes=_A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_A , **_A , ) __SCREAMING_SNAKE_CASE : List[Any] = vocab_file __SCREAMING_SNAKE_CASE : Tuple = load_json(_A ) __SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.encoder.items()} __SCREAMING_SNAKE_CASE : int = spm_file __SCREAMING_SNAKE_CASE : Optional[int] = load_spm(_A , self.sp_model_kwargs ) __SCREAMING_SNAKE_CASE : Optional[int] = len(self.encoder ) __SCREAMING_SNAKE_CASE : Optional[int] = { self.get_lang_token(_A ): self.encoder_size + i for i, lang_code in enumerate(_A ) } __SCREAMING_SNAKE_CASE : Tuple = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_A )} __SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()} __SCREAMING_SNAKE_CASE : str = src_lang if src_lang is not None else '''en''' __SCREAMING_SNAKE_CASE : str = tgt_lang __SCREAMING_SNAKE_CASE : List[str] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) __SCREAMING_SNAKE_CASE : int = num_madeup_words @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return len(self.encoder ) + len(self.lang_token_to_id ) @property def UpperCAmelCase__ ( self : int ): """simple docstring""" return self._src_lang @src_lang.setter def UpperCAmelCase__ ( self : List[str] , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCAmelCase__ ( self : int , _A : str ): """simple docstring""" return self.sp_model.encode(_A , out_type=_A ) def UpperCAmelCase__ ( self : Tuple , _A : Tuple ): """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(_A , self.encoder[self.unk_token] ) def UpperCAmelCase__ ( self : List[str] , _A : int ): """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(_A , self.unk_token ) def UpperCAmelCase__ ( self : Optional[Any] , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = [] __SCREAMING_SNAKE_CASE : List[Any] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_A ) + token __SCREAMING_SNAKE_CASE : Any = [] else: current_sub_tokens.append(_A ) out_string += self.sp_model.decode(_A ) return out_string.strip() def UpperCAmelCase__ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[Any] = [1] * len(self.prefix_tokens ) __SCREAMING_SNAKE_CASE : int = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_A )) + suffix_ones return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones def UpperCAmelCase__ ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.__dict__.copy() __SCREAMING_SNAKE_CASE : Dict = None return state def __setstate__( self : Union[str, Any] , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __SCREAMING_SNAKE_CASE : List[Any] = {} __SCREAMING_SNAKE_CASE : List[Any] = load_spm(self.spm_file , self.sp_model_kwargs ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = Path(_A ) if not save_dir.is_dir(): raise OSError(F'''{save_directory} should be a directory''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) __SCREAMING_SNAKE_CASE : Tuple = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _A ) if os.path.abspath(self.spm_file ) != os.path.abspath(_A ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _A ) elif not os.path.isfile(self.spm_file ): with open(_A , '''wb''' ) as fi: __SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_A ) return (str(_A ), str(_A )) def UpperCAmelCase__ ( self : int , _A : List[str] , _A : str = "en" , _A : Optional[List[str]] = None , _A : str = "ro" , **_A : Union[str, Any] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = src_lang __SCREAMING_SNAKE_CASE : Optional[Any] = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(_A , _A , **_A ) def UpperCAmelCase__ ( self : str , _A : List[Any] , _A : Optional[str] , _A : Optional[str] , **_A : Tuple ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __SCREAMING_SNAKE_CASE : Dict = src_lang __SCREAMING_SNAKE_CASE : List[Any] = self(_A , add_special_tokens=_A , **_A ) __SCREAMING_SNAKE_CASE : Dict = self.get_lang_id(_A ) __SCREAMING_SNAKE_CASE : str = tgt_lang_id return inputs def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_lang_token(_A ) __SCREAMING_SNAKE_CASE : Dict = self.lang_token_to_id[lang_token] __SCREAMING_SNAKE_CASE : Dict = [self.cur_lang_id] __SCREAMING_SNAKE_CASE : int = [self.eos_token_id] def UpperCAmelCase__ ( self : Optional[Any] , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.get_lang_token(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = self.lang_token_to_id[lang_token] __SCREAMING_SNAKE_CASE : Tuple = [self.cur_lang_id] __SCREAMING_SNAKE_CASE : Dict = [self.eos_token_id] def UpperCAmelCase__ ( self : Optional[int] , _A : str ): """simple docstring""" return self.lang_code_to_token[lang] def UpperCAmelCase__ ( self : Dict , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.get_lang_token(_A ) return self.lang_token_to_id[lang_token] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = sentencepiece.SentencePieceProcessor(**snake_case ) spm.Load(str(snake_case ) ) return spm def a__ ( snake_case ): """simple docstring""" with open(snake_case , '''r''' ) as f: return json.load(snake_case ) def a__ ( snake_case , snake_case ): """simple docstring""" with open(snake_case , '''w''' ) as f: json.dump(snake_case , snake_case , indent=2 )
303
import math import os import sys def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = '''''' try: with open(snake_case , '''rb''' ) as binary_file: __SCREAMING_SNAKE_CASE : int = binary_file.read() for dat in data: __SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}''' result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def a__ ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" lexicon.pop(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = last_match_id if math.loga(snake_case ).is_integer(): for curr_key in lexicon: __SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key] __SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:] def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''} __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', '''''' __SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case ) for i in range(len(snake_case ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __SCREAMING_SNAKE_CASE : Any = lexicon[curr_string] result += last_match_id add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case ) index += 1 __SCREAMING_SNAKE_CASE : Tuple = '''''' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string] result += last_match_id return result def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:] __SCREAMING_SNAKE_CASE : int = len(snake_case ) return "0" * (length_length - 1) + file_length_binary + compressed def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 8 try: with open(snake_case , '''wb''' ) as opened_file: __SCREAMING_SNAKE_CASE : Optional[int] = [ to_write[i : i + byte_length] for i in range(0 , len(snake_case ) , snake_case ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case ) __SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case ) write_file_binary(snake_case , snake_case ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
303
1
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowercase : int = logging.get_logger(__name__) # General docstring _lowercase : List[Any] = """PoolFormerConfig""" # Base docstring _lowercase : Optional[int] = """sail/poolformer_s12""" _lowercase : Union[str, Any] = [1, 5_1_2, 7, 7] # Image classification docstring _lowercase : Tuple = """sail/poolformer_s12""" _lowercase : Tuple = """tabby, tabby cat""" _lowercase : str = [ """sail/poolformer_s12""", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] = 0.0 , __lowerCamelCase : str = False ): """simple docstring""" if drop_prob == 0.0 or not training: return input lowerCamelCase__ : List[str] =1 - drop_prob lowerCamelCase__ : Any =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowerCamelCase__ : Dict =keep_prob + torch.rand(__lowerCamelCase , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize lowerCamelCase__ : List[str] =input.div(__lowerCamelCase ) * random_tensor return output class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Any, lowerCamelCase : Union[str, Any] = None )-> Dict: super().__init__() lowerCamelCase__ : Optional[int] =drop_prob def snake_case ( self : List[str], lowerCamelCase : Tuple )-> str: return drop_path(__lowercase, self.drop_prob, self.training ) def snake_case ( self : Union[str, Any] )-> List[str]: return "p={}".format(self.drop_prob ) class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : Optional[Any]=None )-> str: super().__init__() lowerCamelCase__ : Optional[Any] =patch_size if isinstance(__lowercase, collections.abc.Iterable ) else (patch_size, patch_size) lowerCamelCase__ : int =stride if isinstance(__lowercase, collections.abc.Iterable ) else (stride, stride) lowerCamelCase__ : Dict =padding if isinstance(__lowercase, collections.abc.Iterable ) else (padding, padding) lowerCamelCase__ : str =nn.Convad(__lowercase, __lowercase, kernel_size=__lowercase, stride=__lowercase, padding=__lowercase ) lowerCamelCase__ : Tuple =norm_layer(__lowercase ) if norm_layer else nn.Identity() def snake_case ( self : Optional[int], lowerCamelCase : Any )-> Dict: lowerCamelCase__ : Union[str, Any] =self.projection(__lowercase ) lowerCamelCase__ : Dict =self.norm(__lowercase ) return embeddings class __SCREAMING_SNAKE_CASE ( nn.GroupNorm ): '''simple docstring''' def __init__( self : List[Any], lowerCamelCase : Optional[Any], **lowerCamelCase : Union[str, Any] )-> Optional[int]: super().__init__(1, __lowercase, **__lowercase ) class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : List[str], lowerCamelCase : Optional[Any] )-> List[str]: super().__init__() lowerCamelCase__ : List[Any] =nn.AvgPoolad(__lowercase, stride=1, padding=pool_size // 2, count_include_pad=__lowercase ) def snake_case ( self : int, lowerCamelCase : str )-> Optional[Any]: return self.pool(__lowercase ) - hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Optional[int], lowerCamelCase : Optional[int] )-> int: super().__init__() lowerCamelCase__ : Tuple =nn.Convad(__lowercase, __lowercase, 1 ) lowerCamelCase__ : Tuple =nn.Convad(__lowercase, __lowercase, 1 ) lowerCamelCase__ : Optional[int] =PoolFormerDropPath(__lowercase ) if isinstance(config.hidden_act, __lowercase ): lowerCamelCase__ : Tuple =ACTaFN[config.hidden_act] else: lowerCamelCase__ : Any =config.hidden_act def snake_case ( self : Optional[int], lowerCamelCase : Optional[int] )-> int: lowerCamelCase__ : Optional[int] =self.conva(__lowercase ) lowerCamelCase__ : Dict =self.act_fn(__lowercase ) lowerCamelCase__ : int =self.drop(__lowercase ) lowerCamelCase__ : Dict =self.conva(__lowercase ) lowerCamelCase__ : str =self.drop(__lowercase ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Tuple, lowerCamelCase : Any, lowerCamelCase : Tuple, lowerCamelCase : str, lowerCamelCase : Tuple, lowerCamelCase : Tuple, lowerCamelCase : List[Any] )-> Tuple: super().__init__() lowerCamelCase__ : List[str] =PoolFormerPooling(__lowercase ) lowerCamelCase__ : Dict =PoolFormerOutput(__lowercase, __lowercase, __lowercase, __lowercase ) lowerCamelCase__ : str =PoolFormerGroupNorm(__lowercase ) lowerCamelCase__ : List[str] =PoolFormerGroupNorm(__lowercase ) # Useful for training neural nets lowerCamelCase__ : str =PoolFormerDropPath(__lowercase ) if drop_path > 0.0 else nn.Identity() lowerCamelCase__ : Optional[int] =config.use_layer_scale if config.use_layer_scale: lowerCamelCase__ : int =nn.Parameter( config.layer_scale_init_value * torch.ones((__lowercase) ), requires_grad=__lowercase ) lowerCamelCase__ : Optional[int] =nn.Parameter( config.layer_scale_init_value * torch.ones((__lowercase) ), requires_grad=__lowercase ) def snake_case ( self : Dict, lowerCamelCase : List[Any] )-> Union[str, Any]: if self.use_layer_scale: lowerCamelCase__ : Optional[int] =self.pooling(self.before_norm(__lowercase ) ) lowerCamelCase__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowerCamelCase__ : Dict =hidden_states + self.drop_path(__lowercase ) lowerCamelCase__ : str =() lowerCamelCase__ : Any =self.output(self.after_norm(__lowercase ) ) lowerCamelCase__ : List[str] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowerCamelCase__ : Optional[Any] =hidden_states + self.drop_path(__lowercase ) lowerCamelCase__ : int =(output,) + outputs return outputs else: lowerCamelCase__ : Optional[int] =self.drop_path(self.pooling(self.before_norm(__lowercase ) ) ) # First residual connection lowerCamelCase__ : str =pooling_output + hidden_states lowerCamelCase__ : Optional[Any] =() # Second residual connection inside the PoolFormerOutput block lowerCamelCase__ : Optional[int] =self.drop_path(self.output(self.after_norm(__lowercase ) ) ) lowerCamelCase__ : Union[str, Any] =hidden_states + layer_output lowerCamelCase__ : Tuple =(output,) + outputs return outputs class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Dict, lowerCamelCase : Union[str, Any] )-> List[str]: super().__init__() lowerCamelCase__ : Tuple =config # stochastic depth decay rule lowerCamelCase__ : str =[x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths ) )] # patch embeddings lowerCamelCase__ : Union[str, Any] =[] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i], stride=config.strides[i], padding=config.padding[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], ) ) lowerCamelCase__ : Any =nn.ModuleList(__lowercase ) # Transformer blocks lowerCamelCase__ : List[Any] =[] lowerCamelCase__ : Tuple =0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowerCamelCase__ : str =[] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __lowercase, num_channels=config.hidden_sizes[i], pool_size=config.pool_size, hidden_size=config.hidden_sizes[i], intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ), drop_path=dpr[cur + j], ) ) blocks.append(nn.ModuleList(__lowercase ) ) lowerCamelCase__ : Dict =nn.ModuleList(__lowercase ) def snake_case ( self : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Tuple=True )-> Union[str, Any]: lowerCamelCase__ : str =() if output_hidden_states else None lowerCamelCase__ : str =pixel_values for idx, layers in enumerate(zip(self.patch_embeddings, self.block ) ): lowerCamelCase__ , lowerCamelCase__ : Any =layers # Get patch embeddings from hidden_states lowerCamelCase__ : Tuple =embedding_layer(__lowercase ) # Send the embeddings through the blocks for _, blk in enumerate(__lowercase ): lowerCamelCase__ : List[str] =blk(__lowercase ) lowerCamelCase__ : Optional[Any] =layer_outputs[0] if output_hidden_states: lowerCamelCase__ : Union[str, Any] =all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__lowercase, hidden_states=__lowercase ) class __SCREAMING_SNAKE_CASE ( __A ): '''simple docstring''' _a = PoolFormerConfig _a = 'poolformer' _a = 'pixel_values' _a = True def snake_case ( self : str, lowerCamelCase : Optional[int] )-> Any: if isinstance(__lowercase, (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__lowercase, nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def snake_case ( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : List[Any]=False )-> Tuple: if isinstance(__lowercase, __lowercase ): lowerCamelCase__ : List[str] =value _lowercase : Any = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _lowercase : List[Any] = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. """ @add_start_docstrings( 'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __A , ) class __SCREAMING_SNAKE_CASE ( __A ): '''simple docstring''' def __init__( self : str, lowerCamelCase : Tuple )-> Dict: super().__init__(__lowercase ) lowerCamelCase__ : List[Any] =config lowerCamelCase__ : List[str] =PoolFormerEncoder(__lowercase ) # Initialize weights and apply final processing self.post_init() def snake_case ( self : Any )-> List[Any]: return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=__lowercase, config_class=_CONFIG_FOR_DOC, modality='''vision''', expected_output=_EXPECTED_OUTPUT_SHAPE, ) def snake_case ( self : Union[str, Any], lowerCamelCase : str = None, lowerCamelCase : Tuple = None, lowerCamelCase : Any = None, )-> List[str]: lowerCamelCase__ : Dict =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase__ : Union[str, Any] =return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) lowerCamelCase__ : List[Any] =self.encoder( __lowercase, output_hidden_states=__lowercase, return_dict=__lowercase, ) lowerCamelCase__ : Union[str, Any] =encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__lowercase, hidden_states=encoder_outputs.hidden_states, ) class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Any, lowerCamelCase : List[Any] )-> Union[str, Any]: super().__init__() lowerCamelCase__ : Any =nn.Linear(config.hidden_size, config.hidden_size ) def snake_case ( self : List[str], lowerCamelCase : Union[str, Any] )-> Dict: lowerCamelCase__ : int =self.dense(__lowercase ) return output @add_start_docstrings( '\n PoolFormer Model transformer with an image classification head on top\n ' , __A , ) class __SCREAMING_SNAKE_CASE ( __A ): '''simple docstring''' def __init__( self : Any, lowerCamelCase : Union[str, Any] )-> List[Any]: super().__init__(__lowercase ) lowerCamelCase__ : Optional[Any] =config.num_labels lowerCamelCase__ : str =PoolFormerModel(__lowercase ) # Final norm lowerCamelCase__ : List[Any] =PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowerCamelCase__ : str =( nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=__lowercase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def snake_case ( self : List[str], lowerCamelCase : Union[str, Any] = None, lowerCamelCase : int = None, lowerCamelCase : Dict = None, lowerCamelCase : List[str] = None, )-> List[str]: lowerCamelCase__ : Any =return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase__ : Optional[Any] =self.poolformer( __lowercase, output_hidden_states=__lowercase, return_dict=__lowercase, ) lowerCamelCase__ : int =outputs[0] lowerCamelCase__ : Union[str, Any] =self.classifier(self.norm(__lowercase ).mean([-2, -1] ) ) lowerCamelCase__ : Optional[Any] =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCamelCase__ : Any ='''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCamelCase__ : Tuple ='''single_label_classification''' else: lowerCamelCase__ : Optional[Any] ='''multi_label_classification''' if self.config.problem_type == "regression": lowerCamelCase__ : str =MSELoss() if self.num_labels == 1: lowerCamelCase__ : str =loss_fct(logits.squeeze(), labels.squeeze() ) else: lowerCamelCase__ : Any =loss_fct(__lowercase, __lowercase ) elif self.config.problem_type == "single_label_classification": lowerCamelCase__ : Optional[int] =CrossEntropyLoss() lowerCamelCase__ : Union[str, Any] =loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCamelCase__ : Tuple =BCEWithLogitsLoss() lowerCamelCase__ : Dict =loss_fct(__lowercase, __lowercase ) if not return_dict: lowerCamelCase__ : List[str] =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__lowercase, logits=__lowercase, hidden_states=outputs.hidden_states )
359
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _lowercase : Any = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
272
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Dict = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class lowerCamelCase_ (snake_case__ ): '''simple docstring''' __UpperCamelCase: Dict = "gpt_neox_japanese" def __init__( self : Any , A : str=32000 , A : Tuple=2560 , A : Dict=32 , A : Optional[Any]=32 , A : Any=4 , A : Any="gelu" , A : Tuple=1.00 , A : List[Any]=10000 , A : Optional[Any]=2048 , A : Optional[int]=0.02 , A : Optional[int]=1E-5 , A : Tuple=True , A : str=31996 , A : Tuple=31999 , A : Dict=0.1 , A : Optional[Any]=0.0 , **A : int , ): super().__init__(bos_token_id=A , eos_token_id=A , **A ) _UpperCAmelCase : Optional[int] = vocab_size _UpperCAmelCase : List[Any] = max_position_embeddings _UpperCAmelCase : Dict = hidden_size _UpperCAmelCase : Union[str, Any] = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : str = intermediate_multiple_size _UpperCAmelCase : Tuple = hidden_act _UpperCAmelCase : Union[str, Any] = rotary_pct _UpperCAmelCase : Dict = rotary_emb_base _UpperCAmelCase : Optional[Any] = initializer_range _UpperCAmelCase : Tuple = layer_norm_eps _UpperCAmelCase : Any = use_cache _UpperCAmelCase : str = attention_dropout _UpperCAmelCase : Optional[Any] = hidden_dropout
31
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable __SCREAMING_SNAKE_CASE : Optional[int] = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Dict = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
31
1
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _lowercase : Optional[Any] =transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> List[str]: """simple docstring""" if isinstance(_lowercase , torch.Tensor): return image elif isinstance(_lowercase , PIL.Image.Image): a__ : Optional[Any] = [image] a__ : int = [trans(img.convert("""RGB""")) for img in image] a__ : Optional[Any] = torch.stack(_lowercase) return image class snake_case__ (A__ ): """simple docstring""" def __init__( self , __lowercase , __lowercase ) -> Union[str, Any]: """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM a__ : Any = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=__lowercase , scheduler=__lowercase ) def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]: """simple docstring""" if strength < 0 or strength > 1: raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> Optional[Any]: """simple docstring""" a__ : Union[str, Any] = min(int(num_inference_steps * strength ) , __lowercase ) a__ : Tuple = max(num_inference_steps - init_timestep , 0 ) a__ : str = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Any: """simple docstring""" if not isinstance(__lowercase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowercase )}''' ) a__ : List[str] = image.to(device=__lowercase , dtype=__lowercase ) if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(__lowercase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) a__ : Tuple = init_latents.shape a__ : Optional[int] = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase ) # get latents print("""add noise to latents at timestep""" , __lowercase ) a__ : List[str] = self.scheduler.add_noise(__lowercase , __lowercase , __lowercase ) a__ : Optional[int] = init_latents return latents @torch.no_grad() def __call__( self , __lowercase = None , __lowercase = 0.8 , __lowercase = 1 , __lowercase = None , __lowercase = 0.0 , __lowercase = 5_0 , __lowercase = None , __lowercase = "pil" , __lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" self.check_inputs(__lowercase ) # 2. Preprocess image a__ : Union[str, Any] = preprocess(__lowercase ) # 3. set timesteps self.scheduler.set_timesteps(__lowercase , device=self.device ) a__ : Tuple = self.get_timesteps(__lowercase , __lowercase , self.device ) a__ : Union[str, Any] = timesteps[:1].repeat(__lowercase ) # 4. Prepare latent variables a__ : Union[str, Any] = self.prepare_latents(__lowercase , __lowercase , __lowercase , self.unet.dtype , self.device , __lowercase ) a__ : Union[str, Any] = latents # 5. Denoising loop for t in self.progress_bar(__lowercase ): # 1. predict noise model_output a__ : List[Any] = self.unet(__lowercase , __lowercase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 a__ : List[str] = self.scheduler.step( __lowercase , __lowercase , __lowercase , eta=__lowercase , use_clipped_model_output=__lowercase , generator=__lowercase , ).prev_sample a__ : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a__ : Union[str, Any] = self.numpy_to_pil(__lowercase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=__lowercase )
351
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowercase : List[Any] ={} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int =["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _lowercase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
266
0
def lowerCAmelCase_ ( __A, __A ) -> None: '''simple docstring''' UpperCAmelCase__ = len(__A ) print("The following activities are selected:" ) # The first activity is always selected UpperCAmelCase__ = 0 print(__A, end="," ) # Consider rest of the activities for j in range(__A ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__A, end="," ) UpperCAmelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ = [1, 3, 0, 5, 8, 5] UpperCamelCase__ = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
65
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class A ( unittest.TestCase ): def lowercase_ (self : Union[str, Any] ) -> str: """simple docstring""" UpperCAmelCase__ = inspect.getfile(accelerate.test_utils ) UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase__ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def lowercase_ (self : List[str] ) -> Any: """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices.""" ) UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def lowercase_ (self : str ) -> str: """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices.""" ) UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(f"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def lowercase_ (self : Tuple ) -> int: """simple docstring""" UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def lowercase_ (self : Dict ) -> str: """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": UpperCamelCase__ = Accelerator() UpperCamelCase__ = (accelerator.state.process_index + 2, 1_0) UpperCamelCase__ = torch.randint(0, 1_0, shape).to(accelerator.device) UpperCamelCase__ = '' UpperCamelCase__ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCamelCase__ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCamelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
65
1
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _A (lowerCAmelCase__ :int ) -> str: '''simple docstring''' _a , _a = image.size _a , _a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _a = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) _a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_5_5.0 _a = image[None].transpose(0 , 3 , 1 , 2 ) _a = torch.from_numpy(lowerCAmelCase__ ) return 2.0 * image - 1.0 class a ( _SCREAMING_SNAKE_CASE ): def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , ) -> List[Any]: super().__init__() self.register_modules(vqvae=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ ) @torch.no_grad() def __call__( self , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 1_00 , __magic_name__ = 0.0 , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(__magic_name__ , PIL.Image.Image ): _a = 1 elif isinstance(__magic_name__ , torch.Tensor ): _a = image.shape[0] else: raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__magic_name__ )}' ) if isinstance(__magic_name__ , PIL.Image.Image ): _a = preprocess(__magic_name__ ) _a , _a = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _a = (batch_size, self.unet.config.in_channels // 2, height, width) _a = next(self.unet.parameters() ).dtype _a = randn_tensor(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ ) _a = image.to(device=self.device , dtype=__magic_name__ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(__magic_name__ , device=self.device ) _a = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _a = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _a = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _a = {} if accepts_eta: _a = eta for t in self.progress_bar(__magic_name__ ): # concat latents and low resolution image in the channel dimension. _a = torch.cat([latents, image] , dim=1 ) _a = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ ) # predict the noise residual _a = self.unet(__magic_name__ , __magic_name__ ).sample # compute the previous noisy sample x_t -> x_t-1 _a = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample # decode the image latents with the VQVAE _a = self.vqvae.decode(__magic_name__ ).sample _a = torch.clamp(__magic_name__ , -1.0 , 1.0 ) _a = image / 2 + 0.5 _a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _a = self.numpy_to_pil(__magic_name__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=__magic_name__ )
104
'''simple docstring''' import colorsys from PIL import Image # type: ignore def _A (lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :int ) -> float: '''simple docstring''' _a = x _a = y for step in range(lowerCAmelCase__ ): # noqa: B007 _a = a * a - b * b + x _a = 2 * a * b + y _a = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _A (lowerCAmelCase__ :float ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (2_55, 2_55, 2_55) def _A (lowerCAmelCase__ :float ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) ) def _A (lowerCAmelCase__ :int = 8_00 , lowerCAmelCase__ :int = 6_00 , lowerCAmelCase__ :float = -0.6 , lowerCAmelCase__ :float = 0 , lowerCAmelCase__ :float = 3.2 , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :bool = True , ) -> Image.Image: '''simple docstring''' _a = Image.new('RGB' , (image_width, image_height) ) _a = img.load() # loop through the image-coordinates for image_x in range(lowerCAmelCase__ ): for image_y in range(lowerCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates _a = figure_width / image_width * image_height _a = figure_center_x + (image_x / image_width - 0.5) * figure_width _a = figure_center_y + (image_y / image_height - 0.5) * figure_height _a = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: _a = get_color_coded_rgb(lowerCAmelCase__ ) else: _a = get_black_and_white_rgb(lowerCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure a_ : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
104
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class a__ ( A__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization A = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} ) A = Features({'question': Value('string' ), 'context': Value('string' )} ) A = Features( { 'answers': Sequence( { 'text': Value('string' ), 'answer_start': Value('int32' ), } ) } ) A = "question" A = "context" A = "answers" @property def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
18
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } A_ = { '''yjernite/retribert-base-uncased''': 5_12, } A_ = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = PRETRAINED_INIT_CONFIGURATION lowercase__ = RetriBertTokenizer lowercase__ = ["input_ids", "attention_mask"] def __init__( self: int, a_: int=None, a_: Dict=None, a_: Any=True, a_: int="[UNK]", a_: Any="[SEP]", a_: List[Any]="[PAD]", a_: List[Any]="[CLS]", a_: str="[MASK]", a_: Dict=True, a_: Optional[int]=None, **a_: Tuple, ): '''simple docstring''' super().__init__( a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, ) _snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""", a_ ) != do_lower_case or normalizer_state.get("""strip_accents""", a_ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars ): _snake_case : Dict = getattr(a_, normalizer_state.pop("""type""" ) ) _snake_case : List[Any] = do_lower_case _snake_case : List[str] = strip_accents _snake_case : Tuple = tokenize_chinese_chars _snake_case : Tuple = normalizer_class(**a_ ) _snake_case : List[str] = do_lower_case def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int]=None ): '''simple docstring''' _snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : Union[str, Any] = [self.sep_token_id] _snake_case : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ): '''simple docstring''' _snake_case : Union[str, Any] = self._tokenizer.model.save(a_, name=a_ ) return tuple(a_ )
64
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowercase__ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["SpeechEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
360
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ ) as metadata_file: snake_case : int = json.load(SCREAMING_SNAKE_CASE__ ) snake_case : Any = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path snake_case : Any = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''module'''] # Load the entity vocab file snake_case : Dict = load_original_entity_vocab(SCREAMING_SNAKE_CASE__ ) # add an entry for [MASK2] snake_case : List[str] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 snake_case : int = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks snake_case : Union[str, Any] = AddedToken('''<ent>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) snake_case : Optional[int] = AddedToken('''<ent2>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''tokenizer_config.json''' ) , '''r''' ) as f: snake_case : Tuple = json.load(SCREAMING_SNAKE_CASE__ ) snake_case : List[str] = '''MLukeTokenizer''' with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case : List[Any] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) # Initialize the embeddings of the special tokens snake_case : List[str] = tokenizer.convert_tokens_to_ids(['''@'''] )[0] snake_case : List[str] = tokenizer.convert_tokens_to_ids(['''#'''] )[0] snake_case : List[str] = state_dict['''embeddings.word_embeddings.weight'''] snake_case : int = word_emb[ent_init_index].unsqueeze(0 ) snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 ) snake_case : Dict = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: snake_case : Dict = state_dict[bias_name] snake_case : Any = decoder_bias[ent_init_index].unsqueeze(0 ) snake_case : str = decoder_bias[enta_init_index].unsqueeze(0 ) snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: snake_case : Optional[Any] = F'encoder.layer.{layer_index}.attention.self.' snake_case : int = state_dict[prefix + matrix_name] snake_case : Union[str, Any] = state_dict[prefix + matrix_name] snake_case : int = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks snake_case : List[Any] = state_dict['''entity_embeddings.entity_embeddings.weight'''] snake_case : Dict = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) snake_case : List[Any] = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' snake_case : Optional[Any] = state_dict['''entity_predictions.bias'''] snake_case : Optional[int] = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) snake_case : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) snake_case : str = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE__ ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) snake_case : Optional[Any] = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): snake_case : int = state_dict[key] else: snake_case : List[str] = state_dict[key] snake_case ,snake_case : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) if set(SCREAMING_SNAKE_CASE__ ) != {"luke.embeddings.position_ids"}: raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' ) if set(SCREAMING_SNAKE_CASE__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs snake_case : Optional[int] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='''entity_classification''' ) snake_case : Tuple = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' snake_case : int = (0, 9) snake_case : str = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors='''pt''' ) snake_case : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base snake_case : Dict = torch.Size((1, 33, 768) ) snake_case : int = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base snake_case : str = torch.Size((1, 1, 768) ) snake_case : Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' F' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction snake_case : str = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) snake_case : List[Any] = '''Tokyo is the capital of <mask>.''' snake_case : Union[str, Any] = (24, 30) snake_case : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors='''pt''' ) snake_case : int = model(**SCREAMING_SNAKE_CASE__ ) snake_case : List[str] = encoding['''input_ids'''][0].tolist() snake_case : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) snake_case : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE__ ) snake_case : List[Any] = outputs.entity_logits[0][0].argmax().item() snake_case : Dict = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(SCREAMING_SNAKE_CASE__ ) ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> List[str]: '''simple docstring''' snake_case : Dict = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] snake_case : List[Any] = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in open(SCREAMING_SNAKE_CASE__ )] snake_case : Optional[int] = {} for entry in data: snake_case : Optional[Any] = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: snake_case : List[str] = entity_id break snake_case : Any = F'{language}:{entity_name}' snake_case : List[str] = entity_id return new_mapping if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) lowercase__ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
83
0
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple: assert x is not None assert y is not None A_ = len(__A ) A_ = len(__A ) # declaring the array for storing the dp values A_ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1, m + 1 ): for j in range(1, n + 1 ): A_ = 1 if x[i - 1] == y[j - 1] else 0 A_ = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match ) A_ = """""" A_ , A_ = m, n while i > 0 and j > 0: A_ = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: A_ = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": __lowerCamelCase = "AGGTAB" __lowerCamelCase = "GXTXAYB" __lowerCamelCase = 4 __lowerCamelCase = "GTAB" __lowerCamelCase = longest_common_subsequence(a, b) print('''len =''', ln, ''', sub-sequence =''', subseq) import doctest doctest.testmod()
162
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput snake_case_ : List[str] = 8 def A (__A : Union[str, Any] , __A : List[Any]=BITS ) -> Tuple: """simple docstring""" UpperCAmelCase_ = x.device UpperCAmelCase_ = (x * 255).int().clamp(0 , 255 ) UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A ) UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' ) UpperCAmelCase_ = rearrange(__A , '''b c h w -> b c 1 h w''' ) UpperCAmelCase_ = ((x & mask) != 0).float() UpperCAmelCase_ = rearrange(__A , '''b c d h w -> b (c d) h w''' ) UpperCAmelCase_ = bits * 2 - 1 return bits def A (__A : Dict , __A : Tuple=BITS ) -> List[str]: """simple docstring""" UpperCAmelCase_ = x.device UpperCAmelCase_ = (x > 0).int() UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa ) UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' ) UpperCAmelCase_ = rearrange(__A , '''b (c d) h w -> b c d h w''' , d=8 ) UpperCAmelCase_ = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' ) return (dec / 255).clamp(0.0 , 1.0 ) def A (self : List[Any] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : float = 0.0 , __A : bool = True , __A : Tuple=None , __A : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: """simple docstring""" if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas UpperCAmelCase_ = self.alphas_cumprod[timestep] UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod UpperCAmelCase_ = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" UpperCAmelCase_ = self.bit_scale if self.config.clip_sample: UpperCAmelCase_ = torch.clamp(__A , -scale , __A ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) UpperCAmelCase_ = self._get_variance(__A , __A ) UpperCAmelCase_ = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 UpperCAmelCase_ = model_output.device if torch.is_tensor(__A ) else '''cpu''' UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A ) UpperCAmelCase_ = self._get_variance(__A , __A ) ** 0.5 * eta * noise UpperCAmelCase_ = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A ) def A (self : Optional[int] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : int="epsilon" , __A : Optional[Any]=None , __A : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]: """simple docstring""" UpperCAmelCase_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: UpperCAmelCase_ , UpperCAmelCase_ = torch.split(__A , sample.shape[1] , dim=1 ) else: UpperCAmelCase_ = None # 1. compute alphas, betas UpperCAmelCase_ = self.alphas_cumprod[t] UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one UpperCAmelCase_ = 1 - alpha_prod_t UpperCAmelCase_ = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": UpperCAmelCase_ = model_output else: raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" UpperCAmelCase_ = self.bit_scale if self.config.clip_sample: UpperCAmelCase_ = torch.clamp(__A , -scale , __A ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCAmelCase_ = 0 if t > 0: UpperCAmelCase_ = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device ) UpperCAmelCase_ = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise UpperCAmelCase_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A ) class __snake_case ( a ): def __init__( self : Union[str, Any] , _snake_case : UNetaDConditionModel , _snake_case : Union[DDIMScheduler, DDPMScheduler] , _snake_case : Optional[float] = 1.0 , ): """simple docstring""" super().__init__() UpperCAmelCase_ = bit_scale UpperCAmelCase_ = ( ddim_bit_scheduler_step if isinstance(_snake_case , _snake_case) else ddpm_bit_scheduler_step ) self.register_modules(unet=_snake_case , scheduler=_snake_case) @torch.no_grad() def __call__( self : Union[str, Any] , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 50 , _snake_case : Optional[torch.Generator] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : Optional[Any] , ): """simple docstring""" UpperCAmelCase_ = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=_snake_case , ) UpperCAmelCase_ = decimal_to_bits(_snake_case) * self.bit_scale UpperCAmelCase_ = latents.to(self.device) self.scheduler.set_timesteps(_snake_case) for t in self.progress_bar(self.scheduler.timesteps): # predict the noise residual UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample UpperCAmelCase_ = bits_to_decimal(_snake_case) if output_type == "pil": UpperCAmelCase_ = self.numpy_to_pil(_snake_case) if not return_dict: return (image,) return ImagePipelineOutput(images=_snake_case)
51
0
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def UpperCamelCase_( snake_case : Any , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Tuple ): '''simple docstring''' if (ksize % 2) == 0: snake_case_ = ksize + 1 snake_case_ = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(SCREAMING_SNAKE_CASE_ ): for x in range(SCREAMING_SNAKE_CASE_ ): # distance from center snake_case_ = x - ksize // 2 snake_case_ = y - ksize // 2 # degree to radiant snake_case_ = theta / 1_8_0 * np.pi snake_case_ = np.cos(_theta ) snake_case_ = np.sin(_theta ) # get kernel x snake_case_ = cos_theta * px + sin_theta * py # get kernel y snake_case_ = -sin_theta * px + cos_theta * py # fill kernel snake_case_ = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image _SCREAMING_SNAKE_CASE : Dict = imread("../image_data/lena.jpg") # turn image in gray scale value _SCREAMING_SNAKE_CASE : Tuple = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges _SCREAMING_SNAKE_CASE : Dict = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: _SCREAMING_SNAKE_CASE : str = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) _SCREAMING_SNAKE_CASE : List[Any] = out / out.max() * 255 _SCREAMING_SNAKE_CASE : Any = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
366
'''simple docstring''' from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCamelCase_( snake_case : str , snake_case : complex , snake_case : str = "x" , snake_case : float = 1_0**-1_0 , snake_case : int = 1 , ): '''simple docstring''' snake_case_ = symbols(snake_case ) snake_case_ = lambdify(snake_case , snake_case ) snake_case_ = lambdify(snake_case , diff(snake_case , snake_case ) ) snake_case_ = starting_point while True: if diff_function(snake_case ) != 0: snake_case_ = prev_guess - multiplicity * func(snake_case ) / diff_function( snake_case ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess snake_case_ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial # Find fourth Root of 5 print(F"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}") # Find value of e print( "The root of log(y) - 1 = 0 is ", F"{newton_raphson('log(y) - 1', 2, variable='y')}", ) # Exponential Roots print( "The root of exp(x) - 1 = 0 is", F"{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}", ) # Find root of cos(x) print(F"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
92
0
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument lowerCAmelCase__ = { '''/attention/''': '''/0/SelfAttention/''', '''/self_attention/''': '''/0/SelfAttention/''', '''/encoder_decoder_attention/''': '''/1/EncDecAttention/''', '''value''': '''v''', '''query''': '''q''', '''key''': '''k''', '''out''': '''o''', '''pre_self_attention_layer_norm''': '''0/layer_norm''', '''pre_cross_attention_layer_norm''': '''1/layer_norm''', '''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong '''token_embedder''': '''shared''', '''encoder_norm''': '''final_layer_norm''', '''decoder_norm''': '''final_layer_norm''', '''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''', '''router/router_weights/w/''': '''router/classifier/''', '''roer/roer_weights/w/''': '''router/classifier/''', '''logits_dense''': '''lm_head''', } def _A ( A__ ): """simple docstring""" __lowercase = list(s_dict.keys() ) for key in keys: __lowercase = R'''.*/layers_(\d+)''' __lowercase = key if re.match(A__ , A__ ): __lowercase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , A__ ) __lowercase = R'''(encoder|decoder)\/''' if re.match(A__ , A__ ): __lowercase = re.match(A__ , A__ ).groups() if groups[0] == "encoder": __lowercase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , A__ ) __lowercase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , A__ ) elif groups[0] == "decoder": __lowercase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , A__ ) __lowercase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , A__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: __lowercase = new_key.replace(A__ , A__ ) print(F"{key} -> {new_key}" ) __lowercase = s_dict.pop(A__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowercase = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowercase = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: __lowercase = s_dict[key].shape[0] __lowercase = s_dict[key] for idx in range(A__ ): __lowercase = expert_weihts[idx] print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" ) s_dict.pop(A__ ) return s_dict lowerCAmelCase__ = { '''NUM_ENCODER_LAYERS''': '''num_layers''', '''NUM_DECODER_LAYERS''': '''num_decoder_layers''', '''NUM_HEADS''': '''num_heads''', '''HEAD_DIM''': '''d_kv''', '''EMBED_DIM''': '''d_model''', '''MLP_DIM''': '''d_ff''', '''NUM_SELECTED_EXPERTS''': '''num_selected_experts''', '''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''', '''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''', '''dense.MlpBlock.activations''': '''feed_forward_proj''', } def _A ( A__ , A__ ): """simple docstring""" import regex as re with open(A__ , '''r''' ) as f: __lowercase = f.read() __lowercase = re.findall(R'''(.*) = ([0-9.]*)''' , A__ ) __lowercase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": __lowercase = float(A__ ) if '''.''' in value else int(A__ ) __lowercase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , A__ )[0] __lowercase = str(activation[1] ) __lowercase = num_experts __lowercase = SwitchTransformersConfig(**A__ ) return config def _A ( A__ , A__ , A__=None , A__="./" , A__=8 ): """simple docstring""" print(F"Loading flax weights from : {flax_checkpoint_path}" ) __lowercase = checkpoints.load_tax_checkpoint(A__ ) if gin_file is not None: __lowercase = convert_gin_to_config(A__ , A__ ) else: __lowercase = SwitchTransformersConfig.from_pretrained(A__ ) __lowercase = SwitchTransformersForConditionalGeneration(A__ ) __lowercase = flax_params['''target'''] __lowercase = flatten_dict(A__ , sep='''/''' ) __lowercase = rename_keys(A__ ) __lowercase = unflatten_dict(A__ , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(A__ , A__ ) print(F"Save PyTorch model to {pytorch_dump_path}" ) pt_model.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the''' ''' model architecture. If not provided, a `gin_file` has to be provided.''' ), ) parser.add_argument( '''--gin_file''', default=None, type=str, required=False, help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''', ) parser.add_argument( '''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.''' ) parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''') lowerCAmelCase__ = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
104
"""simple docstring""" from typing import List import numpy as np def lowercase ( a__ : dict ) -> int: _UpperCamelCase = {key: len(a__ ) for key, value in gen_kwargs.items() if isinstance(a__ , a__ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) _UpperCamelCase = max(lists_lengths.values() , default=0 ) return max(1 , a__ ) def lowercase ( a__ : int , a__ : int ) -> List[range]: _UpperCamelCase = [] for group_idx in range(a__ ): _UpperCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _UpperCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _UpperCamelCase = range(a__ , start + num_shards_to_add ) shards_indices_per_group.append(a__ ) return shards_indices_per_group def lowercase ( a__ : dict , a__ : int ) -> List[dict]: _UpperCamelCase = _number_of_shards_in_gen_kwargs(a__ ) if num_shards == 1: return [dict(a__ )] else: _UpperCamelCase = _distribute_shards(num_shards=a__ , max_num_jobs=a__ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(a__ , a__ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(a__ ) ) ] def lowercase ( a__ : List[dict] ) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , a__ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def lowercase ( a__ : np.random.Generator , a__ : dict ) -> dict: _UpperCamelCase = {len(a__ ) for value in gen_kwargs.values() if isinstance(a__ , a__ )} _UpperCamelCase = {} for size in list_sizes: _UpperCamelCase = list(range(a__ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _UpperCamelCase = dict(a__ ) for key, value in shuffled_kwargs.items(): if isinstance(a__ , a__ ): _UpperCamelCase = [value[i] for i in indices_per_size[len(a__ )]] return shuffled_kwargs
256
0
'''simple docstring''' import argparse import collections import json import os import re import string import sys import numpy as np UpperCAmelCase = re.compile(r'\b(a|an|the)\b', re.UNICODE) UpperCAmelCase = None def _snake_case ( ) -> int: """simple docstring""" lowerCAmelCase = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" ) parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" ) parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" ) parser.add_argument( """--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" ) parser.add_argument( """--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" ) parser.add_argument( """--na-prob-thresh""" , """-t""" , type=_SCREAMING_SNAKE_CASE , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , ) parser.add_argument( """--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_SCREAMING_SNAKE_CASE , help="""Save precision-recall curves to directory.""" ) parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: """simple docstring""" lowerCAmelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowerCAmelCase = bool(qa["""answers"""]["""text"""] ) return qid_to_has_ans def _snake_case ( _SCREAMING_SNAKE_CASE : Dict ) -> Dict: """simple docstring""" def remove_articles(_SCREAMING_SNAKE_CASE : int ): return ARTICLES_REGEX.sub(""" """ , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE : Any ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Union[str, Any] ): lowerCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : Tuple ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> List[Any]: """simple docstring""" if not s: return [] return normalize_answer(_SCREAMING_SNAKE_CASE ).split() def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]: """simple docstring""" return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) ) def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> Dict: """simple docstring""" lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = sum(common.values() ) if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 lowerCAmelCase = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = (2 * precision * recall) / (precision + recall) return fa def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" lowerCAmelCase = {} lowerCAmelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowerCAmelCase = qa["""id"""] lowerCAmelCase = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_SCREAMING_SNAKE_CASE )] if not gold_answers: # For unanswerable questions, only correct answer is empty string lowerCAmelCase = [""""""] if qid not in preds: print(f'Missing prediction for {qid}' ) continue lowerCAmelCase = preds[qid] # Take max over all gold answers lowerCAmelCase = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers ) lowerCAmelCase = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers ) return exact_scores, fa_scores def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Tuple: """simple docstring""" lowerCAmelCase = {} for qid, s in scores.items(): lowerCAmelCase = na_probs[qid] > na_prob_thresh if pred_na: lowerCAmelCase = float(not qid_to_has_ans[qid] ) else: lowerCAmelCase = s return new_scores def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any=None ) -> List[Any]: """simple docstring""" if not qid_list: lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) return collections.OrderedDict( [ ("""exact""", 100.0 * sum(exact_scores.values() ) / total), ("""f1""", 100.0 * sum(fa_scores.values() ) / total), ("""total""", total), ] ) else: lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) return collections.OrderedDict( [ ("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("""total""", total), ] ) def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" for k in new_eval: lowerCAmelCase = new_eval[k] def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]: """simple docstring""" plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="""b""" , alpha=0.2 , where="""post""" ) plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step="""post""" , alpha=0.2 , color="""b""" ) plt.xlabel("""Recall""" ) plt.ylabel("""Precision""" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(_SCREAMING_SNAKE_CASE ) plt.savefig(_SCREAMING_SNAKE_CASE ) plt.clf() def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : List[Any]=None ) -> int: """simple docstring""" lowerCAmelCase = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] ) lowerCAmelCase = 0.0 lowerCAmelCase = 1.0 lowerCAmelCase = 0.0 lowerCAmelCase = [1.0] lowerCAmelCase = [0.0] lowerCAmelCase = 0.0 for i, qid in enumerate(_SCREAMING_SNAKE_CASE ): if qid_to_has_ans[qid]: true_pos += scores[qid] lowerCAmelCase = true_pos / float(i + 1 ) lowerCAmelCase = true_pos / float(_SCREAMING_SNAKE_CASE ) if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_SCREAMING_SNAKE_CASE ) recalls.append(_SCREAMING_SNAKE_CASE ) if out_image: plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return {"ap": 100.0 * avg_prec} def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]: """simple docstring""" if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return lowerCAmelCase = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , ) lowerCAmelCase = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , ) lowerCAmelCase = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()} lowerCAmelCase = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_exact""" ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_f1""" ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_oracle""" ) def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" if not qid_list: return lowerCAmelCase = [na_probs[k] for k in qid_list] lowerCAmelCase = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) ) plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) ) plt.xlabel("""Model probability of no-answer""" ) plt.ylabel("""Proportion of dataset""" ) plt.title(f'Histogram of no-answer probability: {name}' ) plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , f'na_prob_hist_{name}.png' ) ) plt.clf() def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) lowerCAmelCase = num_no_ans lowerCAmelCase = cur_score lowerCAmelCase = 0.0 lowerCAmelCase = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] ) for i, qid in enumerate(_SCREAMING_SNAKE_CASE ): if qid not in scores: continue if qid_to_has_ans[qid]: lowerCAmelCase = scores[qid] else: if preds[qid]: lowerCAmelCase = -1 else: lowerCAmelCase = 0 cur_score += diff if cur_score > best_score: lowerCAmelCase = cur_score lowerCAmelCase = na_probs[qid] return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]: """simple docstring""" lowerCAmelCase, lowerCAmelCase = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase, lowerCAmelCase = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = best_exact lowerCAmelCase = exact_thresh lowerCAmelCase = best_fa lowerCAmelCase = fa_thresh def _snake_case ( ) -> List[str]: """simple docstring""" with open(OPTS.data_file ) as f: lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = dataset_json["""data"""] with open(OPTS.pred_file ) as f: lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = {k: 0.0 for k in preds} lowerCAmelCase = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False lowerCAmelCase = [k for k, v in qid_to_has_ans.items() if v] lowerCAmelCase = [k for k, v in qid_to_has_ans.items() if not v] lowerCAmelCase, lowerCAmelCase = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh ) lowerCAmelCase = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh ) lowerCAmelCase = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if has_ans_qids: lowerCAmelCase = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """HasAns""" ) if no_ans_qids: lowerCAmelCase = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """NoAns""" ) if OPTS.na_prob_file: find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir ) histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , """hasAns""" ) histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , """noAns""" ) if OPTS.out_file: with open(OPTS.out_file , """w""" ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) ) if __name__ == "__main__": UpperCAmelCase = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
187
'''simple docstring''' class __snake_case: '''simple docstring''' def __init__( self ) -> None: lowerCAmelCase = {} # Mapping from char to TrieNode lowerCAmelCase = False def __snake_case ( self , A_ ) -> None: for word in words: self.insert(A_ ) def __snake_case ( self , A_ ) -> None: lowerCAmelCase = self for char in word: if char not in curr.nodes: lowerCAmelCase = TrieNode() lowerCAmelCase = curr.nodes[char] lowerCAmelCase = True def __snake_case ( self , A_ ) -> bool: lowerCAmelCase = self for char in word: if char not in curr.nodes: return False lowerCAmelCase = curr.nodes[char] return curr.is_leaf def __snake_case ( self , A_ ) -> None: def _delete(A_ , A_ , A_ ) -> bool: if index == len(A_ ): # If word does not exist if not curr.is_leaf: return False lowerCAmelCase = False return len(curr.nodes ) == 0 lowerCAmelCase = word[index] lowerCAmelCase = curr.nodes.get(A_ ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted lowerCAmelCase = _delete(A_ , A_ , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , A_ , 0 ) def _snake_case ( _SCREAMING_SNAKE_CASE : TrieNode , _SCREAMING_SNAKE_CASE : str ) -> None: """simple docstring""" if node.is_leaf: print(_SCREAMING_SNAKE_CASE , end=""" """ ) for key, value in node.nodes.items(): print_words(_SCREAMING_SNAKE_CASE , word + key ) def _snake_case ( ) -> bool: """simple docstring""" lowerCAmelCase = """banana bananas bandana band apple all beast""".split() lowerCAmelCase = TrieNode() root.insert_many(_SCREAMING_SNAKE_CASE ) # print_words(root, "") assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words ) assert root.find("""banana""" ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) assert root.find("""apple""" ) assert root.find("""all""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ) -> None: """simple docstring""" print(str(_SCREAMING_SNAKE_CASE ) , """works!""" if passes else """doesn't work :(""" ) def _snake_case ( ) -> None: """simple docstring""" assert test_trie() def _snake_case ( ) -> None: """simple docstring""" print_results("""Testing trie functionality""" , test_trie() ) if __name__ == "__main__": main()
187
1
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=False ): """simple docstring""" try: snake_case = os.environ[key] except KeyError: # KEY isn't set, default to `default`. snake_case = default else: # KEY is set, convert it to True or False. try: snake_case = strtobool(_lowerCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value _SCREAMING_SNAKE_CASE : Optional[int] = parse_flag_from_env("RUN_SLOW", default=False) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skip('''Test was skipped''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(_run_slow_tests ,'''test is slow''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(not torch.cuda.is_available() ,'''test requires only a CPU''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(torch.cuda.is_available() ,'''test requires a GPU''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(is_xpu_available() ,'''test requires a XPU''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(is_mps_available() ,'''test requires a `mps` backend support in `torch`''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless( is_transformers_available() and is_datasets_available() ,'''test requires the Hugging Face suite''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(is_bnb_available() ,'''test requires the bitsandbytes library''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(is_tpu_available() ,'''test requires TPU''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() == 1 ,'''test requires a GPU''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() == 1 ,'''test requires a XPU''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() > 1 ,'''test requires multiple GPUs''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() > 1 ,'''test requires multiple XPUs''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(is_safetensors_available() ,'''test requires safetensors''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(is_deepspeed_available() ,'''test requires DeepSpeed''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(is_torch_version('''>=''' ,'''1.12.0''' ) ,'''test requires torch version >= 1.12.0''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_=None ,UpperCamelCase_=None ): """simple docstring""" if test_case is None: return partial(_lowerCAmelCase ,version=_lowerCAmelCase ) return unittest.skipUnless(is_torch_version('''>=''' ,_lowerCAmelCase ) ,F'''test requires torch version >= {version}''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(is_tensorboard_available() ,'''test requires Tensorboard''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(is_wandb_available() ,'''test requires wandb''' )(_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless(is_comet_ml_available() ,'''test requires comet_ml''' )(_lowerCAmelCase ) _SCREAMING_SNAKE_CASE : Any = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return unittest.skipUnless( _atleast_one_tracker_available ,'''test requires at least one tracker to be available and for `comet_ml` to not be installed''' ,)(_lowerCAmelCase ) class A__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = True @classmethod def a_ ( cls ): snake_case = tempfile.mkdtemp() @classmethod def a_ ( cls ): if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def a_ ( self ): if self.clear_on_setup: for path in Path(self.tmpdir ).glob('''**/*''' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(snake_case__ ) class A__ ( unittest.TestCase ): """simple docstring""" def a_ ( self ): super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class A__ ( unittest.TestCase ): """simple docstring""" def a_ ( self , __snake_case ): snake_case = mocks if isinstance(snake_case__ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" snake_case = AcceleratorState() snake_case = tensor[None].clone().to(state.device ) snake_case = gather(_lowerCAmelCase ).cpu() snake_case = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] ,_lowerCAmelCase ): return False return True class A__ : """simple docstring""" def __init__( self , __snake_case , __snake_case , __snake_case ): snake_case = returncode snake_case = stdout snake_case = stderr async def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" while True: snake_case = await stream.readline() if line: callback(_lowerCAmelCase ) else: break async def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=None ,UpperCamelCase_=None ,UpperCamelCase_=None ,UpperCamelCase_=False ,UpperCamelCase_=False ): """simple docstring""" if echo: print('''\nRunning: ''' ,''' '''.join(_lowerCAmelCase ) ) snake_case = await asyncio.create_subprocess_exec( cmd[0] ,*cmd[1:] ,stdin=_lowerCAmelCase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowerCAmelCase ,) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) snake_case = [] snake_case = [] def tee(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_="" ): snake_case = line.decode('''utf-8''' ).rstrip() sink.append(_lowerCAmelCase ) if not quiet: print(_lowerCAmelCase ,_lowerCAmelCase ,file=_lowerCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout ,lambda UpperCamelCase_ : tee(_lowerCAmelCase ,_lowerCAmelCase ,sys.stdout ,label='''stdout:''' ) ) ), asyncio.create_task(_read_stream(p.stderr ,lambda UpperCamelCase_ : tee(_lowerCAmelCase ,_lowerCAmelCase ,sys.stderr ,label='''stderr:''' ) ) ), ] ,timeout=_lowerCAmelCase ,) return _RunOutput(await p.wait() ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=None ,UpperCamelCase_=None ,UpperCamelCase_=1_80 ,UpperCamelCase_=False ,UpperCamelCase_=True ): """simple docstring""" snake_case = asyncio.get_event_loop() snake_case = loop.run_until_complete( _stream_subprocess(_lowerCAmelCase ,env=_lowerCAmelCase ,stdin=_lowerCAmelCase ,timeout=_lowerCAmelCase ,quiet=_lowerCAmelCase ,echo=_lowerCAmelCase ) ) snake_case = ''' '''.join(_lowerCAmelCase ) if result.returncode > 0: snake_case = '''\n'''.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) return result class A__ ( a_ ): """simple docstring""" pass def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=False ): """simple docstring""" try: snake_case = subprocess.check_output(_lowerCAmelCase ,stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_lowerCAmelCase ,'''decode''' ): snake_case = output.decode('''utf-8''' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F'''Command `{" ".join(_lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
127
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCAmelCase : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase : Any =""" Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior.to(\"cuda\") >>> prompt = \"A red cartoon frog, 4k\" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16 ... ) >>> pipe.to(\"cuda\") >>> init_image = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/frog.png\" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save(\"red_frog.png\") ``` """ def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=8): UpperCamelCase_ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCamelCase_ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=5_12 , _lowerCAmelCase=5_12): UpperCamelCase_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1) UpperCamelCase_ = np.array(pil_image.convert("RGB")) UpperCamelCase_ = arr.astype(np.floataa) / 127.5 - 1 UpperCamelCase_ = np.transpose(_lowerCAmelCase , [2, 0, 1]) UpperCamelCase_ = torch.from_numpy(_lowerCAmelCase).unsqueeze(0) return image class _lowercase (a_ ): '''simple docstring''' def __init__( self , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' super().__init__() self.register_modules( unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , ) UpperCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' UpperCamelCase_ = min(int(num_inference_steps * strength ) , snake_case__ ) UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 ) UpperCamelCase_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ): '''simple docstring''' if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" ) UpperCamelCase_ = image.to(device=snake_case__ , dtype=snake_case__ ) UpperCamelCase_ = batch_size * num_images_per_prompt if image.shape[1] == 4: UpperCamelCase_ = image else: if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) elif isinstance(snake_case__ , snake_case__ ): UpperCamelCase_ = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ ) ] UpperCamelCase_ = torch.cat(snake_case__ , dim=0 ) else: UpperCamelCase_ = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ ) UpperCamelCase_ = self.movq.config.scaling_factor * init_latents UpperCamelCase_ = torch.cat([init_latents] , dim=0 ) UpperCamelCase_ = init_latents.shape UpperCamelCase_ = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ ) # get latents UpperCamelCase_ = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ ) UpperCamelCase_ = init_latents return latents def _lowerCamelCase ( self , snake_case__=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" ) UpperCamelCase_ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(snake_case__ , snake_case__ ) def _lowerCamelCase ( self , snake_case__=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=snake_case__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCamelCase_ = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCamelCase_ , UpperCamelCase_ = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ ) # We'll offload the last model manually. UpperCamelCase_ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowerCamelCase ( self ): '''simple docstring''' if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(snake_case__ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(snake_case__ ) def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 100 , snake_case__ = 4.0 , snake_case__ = 0.3 , snake_case__ = 1 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ): '''simple docstring''' UpperCamelCase_ = self._execution_device UpperCamelCase_ = guidance_scale > 1.0 if isinstance(snake_case__ , snake_case__ ): UpperCamelCase_ = torch.cat(snake_case__ , dim=0 ) UpperCamelCase_ = image_embeds.shape[0] if isinstance(snake_case__ , snake_case__ ): UpperCamelCase_ = torch.cat(snake_case__ , dim=0 ) if do_classifier_free_guidance: UpperCamelCase_ = image_embeds.repeat_interleave(snake_case__ , dim=0 ) UpperCamelCase_ = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 ) UpperCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ ) if not isinstance(snake_case__ , snake_case__ ): UpperCamelCase_ = [image] if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" ) UpperCamelCase_ = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 ) UpperCamelCase_ = image.to(dtype=image_embeds.dtype , device=snake_case__ ) UpperCamelCase_ = self.movq.encode(snake_case__ )["latents"] UpperCamelCase_ = latents.repeat_interleave(snake_case__ , dim=0 ) self.scheduler.set_timesteps(snake_case__ , device=snake_case__ ) UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ ) UpperCamelCase_ = timesteps[:1].repeat(batch_size * num_images_per_prompt ) UpperCamelCase_ , UpperCamelCase_ = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor ) UpperCamelCase_ = self.prepare_latents( snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ ) for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase_ = {"image_embeds": image_embeds} UpperCamelCase_ = self.unet( sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0] if do_classifier_free_guidance: UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 ) UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 ) UpperCamelCase_ , UpperCamelCase_ = variance_pred.chunk(2 ) UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase_ = self.scheduler.step( snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0] # post-processing UpperCamelCase_ = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCamelCase_ = image * 0.5 + 0.5 UpperCamelCase_ = image.clamp(0 , 1 ) UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCamelCase_ = self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ )
128
0
'''simple docstring''' from itertools import permutations def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False UpperCAmelCase__ = [7, 11, 13, 17] for i, test in enumerate(SCREAMING_SNAKE_CASE__ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 10 ): '''simple docstring''' return sum( int("""""".join(map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) for num in permutations(range(SCREAMING_SNAKE_CASE__ ) ) if is_substring_divisible(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": print(f"{solution() = }")
61
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return int(input_a == input_a == 0 ) def _UpperCamelCase ( ): '''simple docstring''' print("""Truth Table of NOR Gate:""" ) print("""| Input 1 | Input 2 | Output |""" ) print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
1
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = IFInpaintingPipeline UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def lowerCamelCase ( self :List[str] ): return self._get_dummy_components() def lowerCamelCase ( self :Any , __UpperCamelCase :List[str] , __UpperCamelCase :Dict=0 ): if str(__UpperCamelCase ).startswith("mps" ): A = torch.manual_seed(__UpperCamelCase ) else: A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) A = { "prompt": "A painting of a squirrel eating a burger", "image": image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCamelCase ( self :Optional[int] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def lowerCamelCase ( self :Optional[Any] ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def lowerCamelCase ( self :Any ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def lowerCamelCase ( self :Any ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def lowerCamelCase ( self :Tuple ): self._test_save_load_local() def lowerCamelCase ( self :str ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
292
"""simple docstring""" # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def A__ ( UpperCamelCase ): A = [False] * len(UpperCamelCase ) A = [-1] * len(UpperCamelCase ) def dfs(UpperCamelCase , UpperCamelCase ): A = True A = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase , 1 - c ) for i in range(len(UpperCamelCase ) ): if not visited[i]: dfs(UpperCamelCase , 0 ) for i in range(len(UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
292
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase ) _UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=lowercase ) env_command_parser(subparsers=lowercase ) launch_command_parser(subparsers=lowercase ) tpu_command_parser(subparsers=lowercase ) test_command_parser(subparsers=lowercase ) # Let's go _UpperCAmelCase = parser.parse_args() if not hasattr(lowercase ,"""func""" ): parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) snake_case_ = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , UpperCamelCase__ ) if matches: snake_case_ = float(matches[1] ) snake_case_ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". snake_case_ = 1_001 snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = """huggingface/label-files""" snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(UpperCamelCase__ ) + 1: v for k, v in idalabel.items()} snake_case_ = """background""" snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} return config def _a ( ) -> Tuple: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]: snake_case_ = get_mobilenet_va_config(UpperCamelCase__ ) # Load 🤗 model snake_case_ = MobileNetVaForImageClassification(UpperCamelCase__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor snake_case_ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = model(**UpperCamelCase__ ) snake_case_ = outputs.logits assert logits.shape == (1, 1_001) if model_name == "mobilenet_v1_1.0_224": snake_case_ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": snake_case_ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: snake_case_ = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print("""Pushing to the hub...""" ) snake_case_ = """google/""" + model_name image_processor.push_to_hub(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
347
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig __lowerCamelCase = logging.get_logger(__name__) # General docstring __lowerCamelCase = "ResNetConfig" # Base docstring __lowerCamelCase = "microsoft/resnet-50" __lowerCamelCase = [1, 20_48, 7, 7] # Image classification docstring __lowerCamelCase = "microsoft/resnet-50" __lowerCamelCase = "tiger cat" __lowerCamelCase = [ "microsoft/resnet-50", # See all resnet models at https://huggingface.co/models?filter=resnet ] class UpperCamelCase__( nn.Module ): def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 3 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> Any: super().__init__() A__ = nn.Convad( __UpperCAmelCase ,__UpperCAmelCase ,kernel_size=__UpperCAmelCase ,stride=__UpperCAmelCase ,padding=kernel_size // 2 ,bias=__UpperCAmelCase ) A__ = nn.BatchNormad(__UpperCAmelCase ) A__ = ACTaFN[activation] if activation is not None else nn.Identity() def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor: A__ = self.convolution(__UpperCAmelCase ) A__ = self.normalization(__UpperCAmelCase ) A__ = self.activation(__UpperCAmelCase ) return hidden_state class UpperCamelCase__( nn.Module ): def __init__( self ,__UpperCAmelCase ) -> Any: super().__init__() A__ = ResNetConvLayer( config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act ) A__ = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 ) A__ = config.num_channels def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor: A__ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) A__ = self.embedder(__UpperCAmelCase ) A__ = self.pooler(__UpperCAmelCase ) return embedding class UpperCamelCase__( nn.Module ): def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ) -> Optional[Any]: super().__init__() A__ = nn.Convad(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,stride=__UpperCAmelCase ,bias=__UpperCAmelCase ) A__ = nn.BatchNormad(__UpperCAmelCase ) def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor: A__ = self.convolution(__UpperCAmelCase ) A__ = self.normalization(__UpperCAmelCase ) return hidden_state class UpperCamelCase__( nn.Module ): def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> int: super().__init__() A__ = in_channels != out_channels or stride != 1 A__ = ( ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) A__ = nn.Sequential( ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,activation=__UpperCAmelCase ) ,) A__ = ACTaFN[activation] def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]: A__ = hidden_state A__ = self.layer(__UpperCAmelCase ) A__ = self.shortcut(__UpperCAmelCase ) hidden_state += residual A__ = self.activation(__UpperCAmelCase ) return hidden_state class UpperCamelCase__( nn.Module ): def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ,__UpperCAmelCase = 4 ) -> int: super().__init__() A__ = in_channels != out_channels or stride != 1 A__ = out_channels // reduction A__ = ( ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) A__ = nn.Sequential( ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,activation=__UpperCAmelCase ) ,) A__ = ACTaFN[activation] def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]: A__ = hidden_state A__ = self.layer(__UpperCAmelCase ) A__ = self.shortcut(__UpperCAmelCase ) hidden_state += residual A__ = self.activation(__UpperCAmelCase ) return hidden_state class UpperCamelCase__( nn.Module ): def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,) -> Any: super().__init__() A__ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer A__ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ,activation=config.hidden_act ) ,*[layer(__UpperCAmelCase ,__UpperCAmelCase ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,) def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor: A__ = input for layer in self.layers: A__ = layer(__UpperCAmelCase ) return hidden_state class UpperCamelCase__( nn.Module ): def __init__( self ,__UpperCAmelCase ) -> Optional[Any]: super().__init__() A__ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( __UpperCAmelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) ) A__ = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(__UpperCAmelCase ,config.depths[1:] ): self.stages.append(ResNetStage(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,depth=__UpperCAmelCase ) ) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ,__UpperCAmelCase = True ) -> BaseModelOutputWithNoAttention: A__ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A__ = hidden_states + (hidden_state,) A__ = stage_module(__UpperCAmelCase ) if output_hidden_states: A__ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=__UpperCAmelCase ,hidden_states=__UpperCAmelCase ,) class UpperCamelCase__( __A ): lowerCAmelCase__ : str = ResNetConfig lowerCAmelCase__ : str = 'resnet' lowerCAmelCase__ : int = 'pixel_values' lowerCAmelCase__ : Any = True def snake_case__ ( self ,__UpperCAmelCase ) -> List[Any]: if isinstance(__UpperCAmelCase ,nn.Convad ): nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' ) elif isinstance(__UpperCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight ,1 ) nn.init.constant_(module.bias ,0 ) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Any: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): A__ = value __lowerCamelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __lowerCamelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( 'The bare ResNet model outputting raw features without any specific head on top.' , __A , ) class UpperCamelCase__( __A ): def __init__( self ,__UpperCAmelCase ) -> Union[str, Any]: super().__init__(__UpperCAmelCase ) A__ = config A__ = ResNetEmbeddings(__UpperCAmelCase ) A__ = ResNetEncoder(__UpperCAmelCase ) A__ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> BaseModelOutputWithPoolingAndNoAttention: A__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A__ = return_dict if return_dict is not None else self.config.use_return_dict A__ = self.embedder(__UpperCAmelCase ) A__ = self.encoder( __UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase ) A__ = encoder_outputs[0] A__ = self.pooler(__UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__UpperCAmelCase ,pooler_output=__UpperCAmelCase ,hidden_states=encoder_outputs.hidden_states ,) @add_start_docstrings( '\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __A , ) class UpperCamelCase__( __A ): def __init__( self ,__UpperCAmelCase ) -> Tuple: super().__init__(__UpperCAmelCase ) A__ = config.num_labels A__ = ResNetModel(__UpperCAmelCase ) # classification head A__ = nn.Sequential( nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def snake_case__ ( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> ImageClassifierOutputWithNoAttention: A__ = return_dict if return_dict is not None else self.config.use_return_dict A__ = self.resnet(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase ) A__ = outputs.pooler_output if return_dict else outputs[1] A__ = self.classifier(__UpperCAmelCase ) A__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A__ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A__ = 'single_label_classification' else: A__ = 'multi_label_classification' if self.config.problem_type == "regression": A__ = MSELoss() if self.num_labels == 1: A__ = loss_fct(logits.squeeze() ,labels.squeeze() ) else: A__ = loss_fct(__UpperCAmelCase ,__UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": A__ = CrossEntropyLoss() A__ = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A__ = BCEWithLogitsLoss() A__ = loss_fct(__UpperCAmelCase ,__UpperCAmelCase ) if not return_dict: A__ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase ,logits=__UpperCAmelCase ,hidden_states=outputs.hidden_states ) @add_start_docstrings( '\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __A , ) class UpperCamelCase__( __A , __A ): def __init__( self ,__UpperCAmelCase ) -> Optional[Any]: super().__init__(__UpperCAmelCase ) super()._init_backbone(__UpperCAmelCase ) A__ = [config.embedding_size] + config.hidden_sizes A__ = ResNetEmbeddings(__UpperCAmelCase ) A__ = ResNetEncoder(__UpperCAmelCase ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @replace_return_docstrings(output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> BackboneOutput: A__ = return_dict if return_dict is not None else self.config.use_return_dict A__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A__ = self.embedder(__UpperCAmelCase ) A__ = self.encoder(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase ) A__ = outputs.hidden_states A__ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: A__ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=__UpperCAmelCase ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=__UpperCAmelCase ,)
221
0
from collections.abc import Sequence def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float: """simple docstring""" a = 0.0 for coeff in reversed(snake_case_ ): a = result * x + coeff return result if __name__ == "__main__": UpperCamelCase__ : List[Any] = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCamelCase__ : Union[str, Any] = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
361
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCamelCase__ : Optional[Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test UpperCamelCase__ : Any = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] UpperCamelCase__ : List[Any] = dict(zip(vocab, range(len(vocab)))) UpperCamelCase__ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ : Optional[Any] = Path(tmpdirname) UpperCamelCase__ : Tuple = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] UpperCamelCase__ : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) UpperCamelCase__ : Dict = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCamelCase__ : Union[str, Any] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCamelCase__ : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""") UpperCamelCase__ : Tuple = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
330
0
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowercase (snake_case__ : int ) -> float: '''simple docstring''' return np.dot(a_ , a_ ) class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , *, lowerCAmelCase : Union[str, Any] = np.inf , lowerCAmelCase : Any = "linear" , lowerCAmelCase : Dict = 0.0 , ): lowerCAmelCase = regularization lowerCAmelCase = gamma if kernel == "linear": lowerCAmelCase = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("""rbf kernel requires gamma""" ) if not isinstance(self.gamma , (float, int) ): raise ValueError("""gamma must be float or int""" ) if not self.gamma > 0: raise ValueError("""gamma must be > 0""" ) lowerCAmelCase = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: lowerCAmelCase = f'''Unknown kernel: {kernel}''' raise ValueError(lowerCamelCase__ ) def __lowercase ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ): return np.dot(lowerCamelCase__ , lowerCamelCase__ ) def __lowercase ( self : int , lowerCAmelCase : int , lowerCAmelCase : str ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def __lowercase ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any ): lowerCAmelCase = observations lowerCAmelCase = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations (lowerCAmelCase ) = np.shape(lowerCamelCase__ ) def to_minimize(lowerCAmelCase : Optional[int] ) -> float: lowerCAmelCase = 0 (lowerCAmelCase ) = np.shape(lowerCamelCase__ ) for i in range(lowerCamelCase__ ): for j in range(lowerCamelCase__ ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(lowerCamelCase__ ) lowerCAmelCase = LinearConstraint(lowerCamelCase__ , 0 , 0 ) lowerCAmelCase = Bounds(0 , self.regularization ) lowerCAmelCase = minimize( lowerCamelCase__ , np.ones(lowerCamelCase__ ) , bounds=lowerCamelCase__ , constraints=[ly_contraint] ).x lowerCAmelCase = l_star # calculating mean offset of separation plane to points lowerCAmelCase = 0 for i in range(lowerCamelCase__ ): for j in range(lowerCamelCase__ ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) lowerCAmelCase = s / n def __lowercase ( self : List[str] , lowerCAmelCase : List[Any] ): lowerCAmelCase = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , lowerCamelCase__ ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
155
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ :Tuple = { '''configuration_x_clip''': [ '''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XCLIPConfig''', '''XCLIPTextConfig''', '''XCLIPVisionConfig''', ], '''processing_x_clip''': ['''XCLIPProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :Union[str, Any] = [ '''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XCLIPModel''', '''XCLIPPreTrainedModel''', '''XCLIPTextModel''', '''XCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys A_ :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
71
0
def lowerCamelCase__ ( a ) -> Dict: if not isinstance(a , a ): raise TypeError('''Input value must be an \'int\' type''' ) _A: List[str] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
358
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } UpperCAmelCase__ : Tuple = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } UpperCAmelCase__ : Optional[int] = {'facebook/blenderbot_small-90M': 512} def lowerCamelCase__ ( a ) -> Optional[Any]: _A: List[Any] = set() _A: List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A: List[Any] = char _A: Union[str, Any] = set(a ) return pairs class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __UpperCamelCase : str = VOCAB_FILES_NAMES __UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask'''] def __init__( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]="__start__" , lowerCAmelCase_ : Any="__end__" , lowerCAmelCase_ : Any="__unk__" , lowerCAmelCase_ : Any="__null__" , **lowerCAmelCase_ : int , ): """simple docstring""" super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ ) with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle: _A: Optional[int] = json.load(lowerCAmelCase_ ) _A: int = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle: _A: Dict = merges_handle.read().split('''\n''' )[1:-1] _A: int = [tuple(merge.split() ) for merge in merges] _A: Dict = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _A: Union[str, Any] = {} @property def __magic_name__ ( self : Optional[int] ): """simple docstring""" return len(self.encoder ) def __magic_name__ ( self : Optional[int] ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __magic_name__ ( self : str , lowerCAmelCase_ : str ): """simple docstring""" if token in self.cache: return self.cache[token] _A: List[Any] = re.sub('''([.,!?()])''' , R''' \1''' , lowerCAmelCase_ ) _A: List[Any] = re.sub('''(\')''' , R''' \1 ''' , lowerCAmelCase_ ) _A: List[Any] = re.sub(R'''\s{2,}''' , ''' ''' , lowerCAmelCase_ ) if "\n" in token: _A: Dict = token.replace('''\n''' , ''' __newln__''' ) _A: Any = token.split(''' ''' ) _A: Optional[Any] = [] for token in tokens: if not len(lowerCAmelCase_ ): continue _A: str = token.lower() _A: List[str] = tuple(lowerCAmelCase_ ) _A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) _A: Dict = get_pairs(lowerCAmelCase_ ) if not pairs: words.append(lowerCAmelCase_ ) continue while True: _A: str = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A , _A: Optional[int] = bigram _A: str = [] _A: Dict = 0 while i < len(lowerCAmelCase_ ): try: _A: List[Any] = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) new_word.extend(word[i:j] ) _A: Optional[int] = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A: Union[str, Any] = tuple(lowerCAmelCase_ ) _A: Tuple = new_word if len(lowerCAmelCase_ ) == 1: break else: _A: Optional[int] = get_pairs(lowerCAmelCase_ ) _A: str = '''@@ '''.join(lowerCAmelCase_ ) _A: Tuple = word[:-4] _A: List[Any] = word words.append(lowerCAmelCase_ ) return " ".join(lowerCAmelCase_ ) def __magic_name__ ( self : str , lowerCAmelCase_ : str ): """simple docstring""" _A: List[Any] = [] _A: List[Any] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __magic_name__ ( self : str , lowerCAmelCase_ : str ): """simple docstring""" _A: List[str] = token.lower() return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def __magic_name__ ( self : int , lowerCAmelCase_ : int ): """simple docstring""" return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] ): """simple docstring""" _A: List[str] = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _A: Dict = os.path.join( lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A: Any = os.path.join( lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' ) _A: List[str] = 0 with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) _A: Optional[int] = token_index writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file
301
0
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __magic_name__ : def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=True , __snake_case=99 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=16 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ) -> List[str]: '''simple docstring''' __a =parent __a =batch_size __a =seq_length __a =is_training __a =use_input_mask __a =use_token_type_ids __a =use_labels __a =vocab_size __a =hidden_size __a =num_hidden_layers __a =num_attention_heads __a =intermediate_size __a =hidden_act __a =hidden_dropout_prob __a =attention_probs_dropout_prob __a =max_position_embeddings __a =type_vocab_size __a =type_sequence_label_size __a =initializer_range __a =num_labels __a =num_choices __a =scope def __magic_name__ ( self ) -> List[str]: '''simple docstring''' __a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a =None if self.use_input_mask: __a =random_attention_mask([self.batch_size, self.seq_length] ) __a =None if self.use_token_type_ids: __a =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a =None __a =None __a =None if self.use_labels: __a =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a =ids_tensor([self.batch_size] , self.num_choices ) __a =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ ( self ) -> List[Any]: '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , use_stable_embedding=__snake_case , ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]: '''simple docstring''' __a =OpenLlamaModel(config=__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case , attention_mask=__snake_case ) __a =model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> int: '''simple docstring''' __a =True __a =OpenLlamaModel(__snake_case ) model.to(__snake_case ) model.eval() __a =model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , ) __a =model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , ) __a =model(__snake_case , attention_mask=__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple: '''simple docstring''' __a =OpenLlamaForCausalLM(config=__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> int: '''simple docstring''' __a =True __a =True __a =OpenLlamaForCausalLM(config=__snake_case ) model.to(__snake_case ) model.eval() # first forward pass __a =model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , use_cache=__snake_case , ) __a =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __a =ids_tensor((self.batch_size, 3) , config.vocab_size ) __a =ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __a =torch.cat([input_ids, next_tokens] , dim=-1 ) __a =torch.cat([input_mask, next_mask] , dim=-1 ) __a =model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0] __a =model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0] # select random slice __a =ids_tensor((1,) , output_from_past.shape[-1] ).item() __a =output_from_no_past[:, -3:, random_slice_idx].detach() __a =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-3 ) ) def __magic_name__ ( self ) -> str: '''simple docstring''' __a =self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) =config_and_inputs __a ={'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def __magic_name__ ( self ) -> Optional[Any]: '''simple docstring''' __a =OpenLlamaModelTester(self ) __a =ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def __magic_name__ ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def __magic_name__ ( self ) -> str: '''simple docstring''' __a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def __magic_name__ ( self ) -> Dict: '''simple docstring''' __a =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a =type self.model_tester.create_and_check_model(*__snake_case ) def __magic_name__ ( self ) -> Optional[int]: '''simple docstring''' __a , __a =self.model_tester.prepare_config_and_inputs_for_common() __a =3 __a =input_dict['input_ids'] __a =input_ids.ne(1 ).to(__snake_case ) __a =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __a =OpenLlamaForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __magic_name__ ( self ) -> Any: '''simple docstring''' __a , __a =self.model_tester.prepare_config_and_inputs_for_common() __a =3 __a ='single_label_classification' __a =input_dict['input_ids'] __a =input_ids.ne(1 ).to(__snake_case ) __a =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __a =OpenLlamaForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __magic_name__ ( self ) -> Optional[int]: '''simple docstring''' __a , __a =self.model_tester.prepare_config_and_inputs_for_common() __a =3 __a ='multi_label_classification' __a =input_dict['input_ids'] __a =input_ids.ne(1 ).to(__snake_case ) __a =ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __a =OpenLlamaForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def __magic_name__ ( self ) -> Optional[Any]: '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def __magic_name__ ( self , __snake_case ) -> Tuple: '''simple docstring''' __a , __a =self.model_tester.prepare_config_and_inputs_for_common() __a =ids_tensor([1, 10] , config.vocab_size ) __a =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __a =OpenLlamaModel(__snake_case ) original_model.to(__snake_case ) original_model.eval() __a =original_model(__snake_case ).last_hidden_state __a =original_model(__snake_case ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __a ={'type': scaling_type, 'factor': 10.0} __a =OpenLlamaModel(__snake_case ) scaled_model.to(__snake_case ) scaled_model.eval() __a =scaled_model(__snake_case ).last_hidden_state __a =scaled_model(__snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
218
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCamelCase_( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): """simple docstring""" warnings.warn( 'The preprocess method is deprecated and will be removed in a future version. Please' ' use VaeImageProcessor.preprocess instead' , _snake_case , ) if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): __a =[image] if isinstance(image[0] , PIL.Image.Image ): __a , __a =image[0].size __a , __a =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8 __a =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] __a =np.concatenate(_snake_case , axis=0 ) __a =np.array(_snake_case ).astype(np.floataa ) / 255.0 __a =image.transpose(0 , 3 , 1 , 2 ) __a =2.0 * image - 1.0 __a =torch.from_numpy(_snake_case ) elif isinstance(image[0] , torch.Tensor ): __a =torch.cat(_snake_case , dim=0 ) return image def UpperCamelCase_( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): """simple docstring""" if isinstance(_snake_case , torch.Tensor ): return mask elif isinstance(_snake_case , PIL.Image.Image ): __a =[mask] if isinstance(mask[0] , PIL.Image.Image ): __a , __a =mask[0].size __a , __a =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __a =[np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask] __a =np.concatenate(_snake_case , axis=0 ) __a =mask.astype(np.floataa ) / 255.0 __a =0 __a =1 __a =torch.from_numpy(_snake_case ) elif isinstance(mask[0] , torch.Tensor ): __a =torch.cat(_snake_case , dim=0 ) return mask class __magic_name__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = 42 def __init__( self , __snake_case , __snake_case ) -> Union[str, Any]: '''simple docstring''' super().__init__() self.register_modules(unet=__snake_case , scheduler=__snake_case ) @torch.no_grad() def __call__( self , __snake_case , __snake_case , __snake_case = 250 , __snake_case = 0.0 , __snake_case = 10 , __snake_case = 10 , __snake_case = None , __snake_case = "pil" , __snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' __a =image __a =_preprocess_image(__snake_case ) __a =original_image.to(device=self.device , dtype=self.unet.dtype ) __a =_preprocess_mask(__snake_case ) __a =mask_image.to(device=self.device , dtype=self.unet.dtype ) __a =original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError( f'You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch' f' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) __a =original_image.shape __a =randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(__snake_case , __snake_case , __snake_case , self.device ) __a =eta __a =self.scheduler.timesteps[0] + 1 __a =generator[0] if isinstance(__snake_case , __snake_case ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual __a =self.unet(__snake_case , __snake_case ).sample # compute previous image: x_t -> x_t-1 __a =self.scheduler.step(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ).prev_sample else: # compute the reverse: x_t-1 -> x_t __a =self.scheduler.undo_step(__snake_case , __snake_case , __snake_case ) __a =t __a =(image / 2 + 0.5).clamp(0 , 1 ) __a =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a =self.numpy_to_pil(__snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=__snake_case )
218
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __magic_name__ ( __a : int , __a : Optional[int]=False ): '''simple docstring''' UpperCamelCase__ = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("""head""" ): UpperCamelCase__ = """segformer.encoder.""" + key if key.startswith("""backbone""" ): UpperCamelCase__ = key.replace("""backbone""" , """segformer.encoder""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCamelCase__ = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] UpperCamelCase__ = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(__a )-1}" ) if "norm" in key: UpperCamelCase__ = key.replace("""norm""" , """layer_norm""" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCamelCase__ = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )] UpperCamelCase__ = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(__a )-1}" ) if "layer_norm1" in key: UpperCamelCase__ = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: UpperCamelCase__ = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 UpperCamelCase__ = key[key.find("""block""" ) + len("""block""" )] UpperCamelCase__ = key.replace(f"block{idx}" , f"block.{int(__a )-1}" ) if "attn.q" in key: UpperCamelCase__ = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: UpperCamelCase__ = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: UpperCamelCase__ = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: UpperCamelCase__ = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: UpperCamelCase__ = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: UpperCamelCase__ = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: UpperCamelCase__ = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) UpperCamelCase__ = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCamelCase__ = key[key.find("""linear_c""" ) + len("""linear_c""" )] UpperCamelCase__ = key.replace(f"linear_c{idx}" , f"linear_c.{int(__a )-1}" ) if key.startswith("""head""" ): UpperCamelCase__ = key.replace("""head""" , """classifier""" ) UpperCamelCase__ = value return new_state_dict def __magic_name__ ( __a : List[Any] , __a : Optional[int] ): '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCamelCase__ = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" ) UpperCamelCase__ = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" ) # next, add keys and values (in that order) to the state dict UpperCamelCase__ = kv_weight[ : config.hidden_sizes[i], : ] UpperCamelCase__ = kv_bias[: config.hidden_sizes[i]] UpperCamelCase__ = kv_weight[ config.hidden_sizes[i] :, : ] UpperCamelCase__ = kv_bias[ config.hidden_sizes[i] : ] def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase__ = Image.open(requests.get(__a , stream=__a ).raw ) return image @torch.no_grad() def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] , __a : Any ): '''simple docstring''' UpperCamelCase__ = SegformerConfig() UpperCamelCase__ = False # set attributes based on model_name UpperCamelCase__ = """huggingface/label-files""" if "segformer" in model_name: UpperCamelCase__ = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2] if "ade" in model_name: UpperCamelCase__ = 150 UpperCamelCase__ = """ade20k-id2label.json""" UpperCamelCase__ = (1, 150, 128, 128) elif "city" in model_name: UpperCamelCase__ = 19 UpperCamelCase__ = """cityscapes-id2label.json""" UpperCamelCase__ = (1, 19, 128, 128) else: raise ValueError(f"Model {model_name} not supported" ) elif "mit" in model_name: UpperCamelCase__ = True UpperCamelCase__ = model_name[4:6] UpperCamelCase__ = 1_000 UpperCamelCase__ = """imagenet-1k-id2label.json""" UpperCamelCase__ = (1, 1_000) else: raise ValueError(f"Model {model_name} not supported" ) # set config attributes UpperCamelCase__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase__ = {int(__a ): v for k, v in idalabel.items()} UpperCamelCase__ = idalabel UpperCamelCase__ = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": UpperCamelCase__ = [64, 128, 320, 512] UpperCamelCase__ = 256 elif size == "b2": UpperCamelCase__ = [64, 128, 320, 512] UpperCamelCase__ = 768 UpperCamelCase__ = [3, 4, 6, 3] elif size == "b3": UpperCamelCase__ = [64, 128, 320, 512] UpperCamelCase__ = 768 UpperCamelCase__ = [3, 4, 18, 3] elif size == "b4": UpperCamelCase__ = [64, 128, 320, 512] UpperCamelCase__ = 768 UpperCamelCase__ = [3, 8, 27, 3] elif size == "b5": UpperCamelCase__ = [64, 128, 320, 512] UpperCamelCase__ = 768 UpperCamelCase__ = [3, 6, 40, 3] else: raise ValueError(f"Size {size} not supported" ) # load image processor (only resize + normalize) UpperCamelCase__ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a ) # prepare image UpperCamelCase__ = prepare_img() UpperCamelCase__ = image_processor(images=__a , return_tensors="""pt""" ).pixel_values logger.info(f"Converting model {model_name}..." ) # load original state dict if encoder_only: UpperCamelCase__ = torch.load(__a , map_location=torch.device("""cpu""" ) ) else: UpperCamelCase__ = torch.load(__a , map_location=torch.device("""cpu""" ) )["""state_dict"""] # rename keys UpperCamelCase__ = rename_keys(__a , encoder_only=__a ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(__a , __a ) # create HuggingFace model and load state dict if encoder_only: UpperCamelCase__ = False UpperCamelCase__ = SegformerForImageClassification(__a ) else: UpperCamelCase__ = SegformerForSemanticSegmentation(__a ) model.load_state_dict(__a ) model.eval() # forward pass UpperCamelCase__ = model(__a ) UpperCamelCase__ = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": UpperCamelCase__ = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": UpperCamelCase__ = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": UpperCamelCase__ = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": UpperCamelCase__ = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": UpperCamelCase__ = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": UpperCamelCase__ = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": UpperCamelCase__ = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": UpperCamelCase__ = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": UpperCamelCase__ = torch.tensor( [ [ [-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1], [-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1], [-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1], ], [ [-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1], [-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1], [-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1], ], [ [7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2], [4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1], [3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": UpperCamelCase__ = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": UpperCamelCase__ = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": UpperCamelCase__ = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": UpperCamelCase__ = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": UpperCamelCase__ = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": UpperCamelCase__ = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: UpperCamelCase__ = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-2 ) # finally, save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) image_processor.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase_ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
178
import argparse from collections import defaultdict import yaml lowerCamelCase_ = '''docs/source/en/_toctree.yml''' def __magic_name__ ( __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = defaultdict(__a ) for doc in model_doc: counts[doc["local"]] += 1 UpperCamelCase__ = [key for key, value in counts.items() if value > 1] UpperCamelCase__ = [] for duplicate_key in duplicates: UpperCamelCase__ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} ) if len(__a ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] ) # Sort return sorted(__a , key=lambda __a : s["title"].lower() ) def __magic_name__ ( __a : int=False ): '''simple docstring''' with open(__a , encoding="""utf-8""" ) as f: UpperCamelCase__ = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase__ = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase__ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCamelCase__ = api_doc[model_idx]["""sections"""] UpperCamelCase__ = [(idx, section) for idx, section in enumerate(__a ) if """sections""" in section] UpperCamelCase__ = False for idx, modality_doc in modalities_docs: UpperCamelCase__ = modality_doc["""sections"""] UpperCamelCase__ = clean_model_doc_toc(__a ) if old_modality_doc != new_modality_doc: UpperCamelCase__ = True if overwrite: UpperCamelCase__ = new_modality_doc if diff: if overwrite: UpperCamelCase__ = model_doc UpperCamelCase__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowerCamelCase_ = parser.parse_args() check_model_doc(args.fix_and_overwrite)
178
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __snake_case =logging.getLogger(__name__) def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Dict ): return (preds == labels).mean() @dataclass class UpperCAmelCase_ : lowerCamelCase : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCamelCase : Optional[str] = field( default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCamelCase : Optional[str] = field( default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCamelCase : Optional[str] = field( default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class UpperCAmelCase_ : lowerCamelCase : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) lowerCamelCase : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) lowerCamelCase : int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCamelCase : bool = field( default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def a_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , lowerCamelCase ) # Set seed set_seed(training_args.seed ) try: lowerCAmelCase = processors[data_args.task_name]() lowerCAmelCase = processor.get_labels() lowerCAmelCase = len(lowerCamelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets lowerCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowerCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase : EvalPrediction ) -> Dict: lowerCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase , p.label_ids )} # Data collator lowerCAmelCase = DataCollatorWithPadding(lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowerCAmelCase = Trainer( model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , data_collator=lowerCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCAmelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) lowerCAmelCase = trainer.evaluate() lowerCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(lowerCamelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , lowerCamelCase , lowerCamelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(lowerCamelCase ) return results def a_ ( lowerCamelCase : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
4
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Union[str, Any] =ViTImageProcessor if is_vision_available() else None @property def UpperCAmelCase ( self ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self ) -> int: UpperCamelCase :Union[str, Any] = (3, 32, 128) UpperCamelCase :Any = tempfile.mkdtemp() # fmt: off UpperCamelCase :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on UpperCamelCase :Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) UpperCamelCase :Tuple = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } UpperCamelCase :str = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> int: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) UpperCamelCase :List[Any] = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) return image_input def UpperCAmelCase ( self ) -> str: UpperCamelCase :str = self.get_tokenizer() UpperCamelCase :Union[str, Any] = self.get_image_processor() UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase :Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :Optional[int] = self.get_tokenizer() UpperCamelCase :Dict = self.get_image_processor() UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase :Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCamelCase :Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) UpperCamelCase :int = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Tuple = self.get_image_processor() UpperCamelCase :List[str] = self.get_tokenizer() UpperCamelCase :str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = self.prepare_image_inputs() UpperCamelCase :List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) UpperCamelCase :Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Optional[Any] = self.get_image_processor() UpperCamelCase :Union[str, Any] = self.get_tokenizer() UpperCamelCase :int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = '''test''' UpperCamelCase :Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[str] = self.get_image_processor() UpperCamelCase :Tuple = self.get_tokenizer() UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = '''test''' UpperCamelCase :str = self.prepare_image_inputs() UpperCamelCase :Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Optional[Any] = self.get_image_processor() UpperCamelCase :Any = self.get_tokenizer() UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase :Union[str, Any] = processor.char_decode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :List[Any] = self.get_image_processor() UpperCamelCase :Optional[Any] = self.get_tokenizer() UpperCamelCase :Any = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = None UpperCamelCase :List[Any] = self.prepare_image_inputs() UpperCamelCase :Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :str = self.get_image_processor() UpperCamelCase :Tuple = self.get_tokenizer() UpperCamelCase :Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = torch.randn(1 , 27 , 38 ) UpperCamelCase :Union[str, Any] = torch.randn(1 , 27 , 5_0257 ) UpperCamelCase :Optional[Any] = torch.randn(1 , 27 , 3_0522 ) UpperCamelCase :Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
259
0
"""simple docstring""" from __future__ import annotations def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Optional[Any] = 0 _lowercase : Optional[int] = len(__UpperCAmelCase ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: _lowercase : Dict = i + 1 else: _lowercase : Optional[int] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
361
"""simple docstring""" import argparse from collections import defaultdict def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Any = f.readlines() _lowercase : Optional[int] = F"""class {class_name}(""" _lowercase : List[str] = F"""{4 * " "}def {test_name}(""" _lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}""" _lowercase : int = F"""{16 * " "}{correct_line.split()[0]}""" _lowercase : str = False _lowercase : Optional[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = False _lowercase : int = 0 _lowercase : Tuple = 0 _lowercase : Union[str, Any] = [] for line in lines: if line.startswith(__UpperCAmelCase ): _lowercase : List[str] = True elif in_class and line.startswith(__UpperCAmelCase ): _lowercase : str = True elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )): _lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : Optional[int] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Optional[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _lowercase : Union[str, Any] = False else: new_lines.append(__UpperCAmelCase ) with open(__UpperCAmelCase , """w""" ) as f: for line in new_lines: f.write(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ): if fail is not None: with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Dict = {l.strip() for l in f.readlines()} else: _lowercase : int = None with open(__UpperCAmelCase , """r""" ) as f: _lowercase : int = f.readlines() _lowercase : int = defaultdict(__UpperCAmelCase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase: List[Any] = argparse.ArgumentParser() parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""") parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None) UpperCAmelCase: Any = parser.parse_args() main(args.correct_filename, args.fail_filename)
336
0
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging UpperCamelCase = logging.get_logger(__name__) class __UpperCAmelCase : __snake_case : str = 42 __snake_case : Tuple = None @staticmethod def UpperCamelCase ( ): '''simple docstring''' raise NotImplementedError def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[int] , **UpperCAmelCase_: Optional[Any] ): '''simple docstring''' raise NotImplementedError def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str ): '''simple docstring''' raise NotImplementedError def UpperCamelCase ( self: int ): '''simple docstring''' if not self.is_available(): raise RuntimeError( F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def UpperCamelCase ( cls: Any ): '''simple docstring''' return F'`pip install {cls.pip_package or cls.name}`' class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : List[str] = "optuna" @staticmethod def UpperCamelCase ( ): '''simple docstring''' return is_optuna_available() def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , **UpperCAmelCase_: str ): '''simple docstring''' return run_hp_search_optuna(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ): '''simple docstring''' return default_hp_space_optuna(UpperCAmelCase_ ) class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : str = "ray" __snake_case : int = "'ray[tune]'" @staticmethod def UpperCamelCase ( ): '''simple docstring''' return is_ray_available() def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , **UpperCAmelCase_: Union[str, Any] ): '''simple docstring''' return run_hp_search_ray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Any ): '''simple docstring''' return default_hp_space_ray(UpperCAmelCase_ ) class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : List[str] = "sigopt" @staticmethod def UpperCamelCase ( ): '''simple docstring''' return is_sigopt_available() def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ): '''simple docstring''' return run_hp_search_sigopt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[Any] ): '''simple docstring''' return default_hp_space_sigopt(UpperCAmelCase_ ) class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : Tuple = "wandb" @staticmethod def UpperCamelCase ( ): '''simple docstring''' return is_wandb_available() def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , **UpperCAmelCase_: Tuple ): '''simple docstring''' return run_hp_search_wandb(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: List[Any] ): '''simple docstring''' return default_hp_space_wandb(UpperCAmelCase_ ) UpperCamelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __lowerCamelCase ( ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(UpperCAmelCase_ ) > 0: _SCREAMING_SNAKE_CASE = available_backends[0].name if len(UpperCAmelCase_ ) > 1: logger.info( F'{len(UpperCAmelCase_ )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( F' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
306
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __snake_case ( ): lowerCamelCase_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCAmelCase_ ) lowerCamelCase_ = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=UpperCAmelCase_ ) env_command_parser(subparsers=UpperCAmelCase_ ) launch_command_parser(subparsers=UpperCAmelCase_ ) tpu_command_parser(subparsers=UpperCAmelCase_ ) test_command_parser(subparsers=UpperCAmelCase_ ) # Let's go lowerCamelCase_ = parser.parse_args() if not hasattr(UpperCAmelCase_ , "func" ): parser.print_help() exit(1 ) # Run args.func(UpperCAmelCase_ ) if __name__ == "__main__": main()
55
0
'''simple docstring''' import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() A__ : List[str] =logging.get_logger(__name__) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) _lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase ) _lowerCAmelCase = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok _lowerCAmelCase = 8_47 _lowerCAmelCase = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok _lowerCAmelCase = 1_50 _lowerCAmelCase = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok _lowerCAmelCase = 1_71 _lowerCAmelCase = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO _lowerCAmelCase = 1_33 _lowerCAmelCase = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok _lowerCAmelCase = 19 _lowerCAmelCase = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok _lowerCAmelCase = 65 _lowerCAmelCase = """mapillary-vistas-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()} return config def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") ) rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") ) rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") ) rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") ) rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") ) rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") ) # cross-attention out projection rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") ) # MLP 1 rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias") ) # MLP 2 rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias") ) # layernorm 1 (self-attention layernorm) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") ) # layernorm 3 (final layernorm) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight") ) rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias") ) # fmt: on return rename_keys def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = dct.pop(lowerCAmelCase ) _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowerCAmelCase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowerCAmelCase = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" ) _lowerCAmelCase = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase = in_proj_weight[:dim, :] _lowerCAmelCase = in_proj_bias[: dim] _lowerCAmelCase = in_proj_weight[ dim : dim * 2, : ] _lowerCAmelCase = in_proj_bias[ dim : dim * 2 ] _lowerCAmelCase = in_proj_weight[ -dim :, : ] _lowerCAmelCase = in_proj_bias[-dim :] # fmt: on def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) _lowerCAmelCase = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" ) _lowerCAmelCase = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase = in_proj_weight[: hidden_size, :] _lowerCAmelCase = in_proj_bias[:config.hidden_size] _lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] _lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] _lowerCAmelCase = in_proj_weight[-hidden_size :, :] _lowerCAmelCase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) _lowerCAmelCase = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" ) _lowerCAmelCase = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase = in_proj_weight[: hidden_size, :] _lowerCAmelCase = in_proj_bias[:config.hidden_size] _lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] _lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] _lowerCAmelCase = in_proj_weight[-hidden_size :, :] _lowerCAmelCase = in_proj_bias[-hidden_size :] # fmt: on def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ): """simple docstring""" _lowerCAmelCase = get_maskformer_config(lowerCAmelCase ) # load original state_dict with open(lowerCAmelCase , """rb""" ) as f: _lowerCAmelCase = pickle.load(lowerCAmelCase ) _lowerCAmelCase = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys _lowerCAmelCase = create_rename_keys(lowerCAmelCase ) for src, dest in rename_keys: rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) read_in_swin_q_k_v(lowerCAmelCase , config.backbone_config ) read_in_decoder_q_k_v(lowerCAmelCase , lowerCAmelCase ) # update to torch tensors for key, value in state_dict.items(): _lowerCAmelCase = torch.from_numpy(lowerCAmelCase ) # load 🤗 model _lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase ) model.eval() for name, param in model.named_parameters(): print(lowerCAmelCase , param.shape ) _lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(lowerCAmelCase ) == 0, f"Unexpected keys: {unexpected_keys}" # verify results _lowerCAmelCase = prepare_img() if "vistas" in model_name: _lowerCAmelCase = 65 elif "cityscapes" in model_name: _lowerCAmelCase = 6_55_35 else: _lowerCAmelCase = 2_55 _lowerCAmelCase = True if """ade""" in model_name else False _lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase , reduce_labels=lowerCAmelCase ) _lowerCAmelCase = image_processor(lowerCAmelCase , return_tensors="""pt""" ) _lowerCAmelCase = model(**lowerCAmelCase ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": _lowerCAmelCase = torch.tensor( [[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"Saving model and image processor to {pytorch_dump_folder_path}" ) Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) model.save_pretrained(lowerCAmelCase ) image_processor.save_pretrained(lowerCAmelCase ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(f"nielsr/{model_name}" ) image_processor.push_to_hub(f"nielsr/{model_name}" ) if __name__ == "__main__": A__ : str =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''maskformer-swin-tiny-ade''', type=str, help=('''Name of the MaskFormer model you\'d like to convert''',), ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''', type=str, help='''Path to the original state dict (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) A__ : List[Any] =parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
220
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool A__ : List[str] ={ '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class UpperCAmelCase ( snake_case_ ): _lowercase: Dict = '''facebook/nllb-200-distilled-600M''' _lowercase: int = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) _lowercase: Any = '''translator''' _lowercase: Optional[int] = AutoTokenizer _lowercase: str = AutoModelForSeqaSeqLM _lowercase: List[Any] = LANGUAGE_CODES _lowercase: Tuple = ['''text''', '''text''', '''text'''] _lowercase: List[str] = ['''text'''] def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> Optional[Any]: if src_lang not in self.lang_to_code: raise ValueError(f"{src_lang} is not a supported language." ) if tgt_lang not in self.lang_to_code: raise ValueError(f"{tgt_lang} is not a supported language." ) _lowerCAmelCase = self.lang_to_code[src_lang] _lowerCAmelCase = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __snake_case , return_tensors="""pt""" , src_lang=__snake_case , tgt_lang=__snake_case ) def lowercase__ ( self : Optional[int] , __snake_case : Any ) -> List[str]: return self.model.generate(**__snake_case ) def lowercase__ ( self : Dict , __snake_case : List[Any] ) -> Any: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__snake_case )
220
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self: int , snake_case: int , snake_case: Tuple=7 , snake_case: Optional[int]=3 , snake_case: List[Any]=18 , snake_case: Dict=30 , snake_case: Optional[int]=400 , snake_case: int=True , snake_case: Dict=None , snake_case: Dict=True , snake_case: Tuple=None , snake_case: Optional[Any]=True , snake_case: Dict=[0.5, 0.5, 0.5] , snake_case: Union[str, Any]=[0.5, 0.5, 0.5] , ) -> int: snake_case_ :Tuple = size if size is not None else {"""shortest_edge""": 18} snake_case_ :Dict = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} snake_case_ :str = parent snake_case_ :int = batch_size snake_case_ :Tuple = num_channels snake_case_ :Any = image_size snake_case_ :int = min_resolution snake_case_ :Dict = max_resolution snake_case_ :Any = do_resize snake_case_ :Optional[int] = size snake_case_ :Optional[int] = do_center_crop snake_case_ :Dict = crop_size snake_case_ :int = do_normalize snake_case_ :Optional[Any] = image_mean snake_case_ :Optional[Any] = image_std def lowerCAmelCase_ ( self: List[Any] ) -> str: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = LevitImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self: List[Any] ) -> str: snake_case_ :List[str] = LevitImageProcessingTester(self ) @property def lowerCAmelCase_ ( self: str ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self: List[Any] ) -> Any: snake_case_ :Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , """image_mean""" ) ) self.assertTrue(hasattr(snake_case , """image_std""" ) ) self.assertTrue(hasattr(snake_case , """do_normalize""" ) ) self.assertTrue(hasattr(snake_case , """do_resize""" ) ) self.assertTrue(hasattr(snake_case , """do_center_crop""" ) ) self.assertTrue(hasattr(snake_case , """size""" ) ) def lowerCAmelCase_ ( self: Dict ) -> List[str]: snake_case_ :List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) snake_case_ :Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]: pass def lowerCAmelCase_ ( self: int ) -> Optional[Any]: # Initialize image_processing snake_case_ :str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input snake_case_ :int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case_ :Optional[Any] = image_processing(snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: # Initialize image_processing snake_case_ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input snake_case_ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case_ :List[str] = image_processing(snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self: List[str] ) -> int: # Initialize image_processing snake_case_ :Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input snake_case_ :Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case_ :Optional[int] = image_processing(snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
66
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __a = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __a = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __a = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase ) return [m.group(0 ) for m in matches] def A_ ( ): '''simple docstring''' snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES snake_case_ :Dict = { config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. snake_case_ :Optional[Any] = collections.defaultdict(_lowercase ) snake_case_ :int = collections.defaultdict(_lowercase ) snake_case_ :List[str] = collections.defaultdict(_lowercase ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(_lowercase ): snake_case_ :int = None if _re_tf_models.match(_lowercase ) is not None: snake_case_ :int = tf_models snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0] elif _re_flax_models.match(_lowercase ) is not None: snake_case_ :List[Any] = flax_models snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0] elif _re_pt_models.match(_lowercase ) is not None: snake_case_ :Optional[Any] = pt_models snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0] if lookup_dict is not None: while len(_lowercase ) > 0: if attr_name in model_prefix_to_model_type: snake_case_ :Optional[int] = True break # Try again after removing the last word in the name snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] ) snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) snake_case_ :Optional[Any] = list(_lowercase ) all_models.sort() snake_case_ :Optional[int] = {"""model_type""": all_models} snake_case_ :Optional[int] = [pt_models[t] for t in all_models] snake_case_ :Any = [tf_models[t] for t in all_models] snake_case_ :Dict = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure snake_case_ :Dict = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: snake_case_ :Optional[Any] = """AutoProcessor""" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: snake_case_ :Tuple = """AutoTokenizer""" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: snake_case_ :Tuple = """AutoFeatureExtractor""" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. snake_case_ :str = """AutoTokenizer""" snake_case_ :int = [processors[t] for t in all_models] return pd.DataFrame(_lowercase ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[Any] = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""] snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ): # The type of pipeline may not exist in this framework if not hasattr(_lowercase, _lowercase ): continue # First extract all model_names snake_case_ :Tuple = [] for name in getattr(_lowercase, _lowercase ).values(): if isinstance(_lowercase, _lowercase ): model_names.append(_lowercase ) else: model_names.extend(list(_lowercase ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[Any] = get_frameworks_table() snake_case_ :str = Dataset.from_pandas(_lowercase ) snake_case_ :List[Any] = hf_hub_download( """huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase ) snake_case_ :List[str] = Dataset.from_json(_lowercase ) snake_case_ :int = { tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""]) for i in range(len(_lowercase ) ) } snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. snake_case_ :Tuple = sorted(table.keys() ) snake_case_ :Tuple = pd.DataFrame( { """model_class""": model_classes, """pipeline_tag""": [table[m][0] for m in model_classes], """auto_class""": [table[m][1] for m in model_classes], } ) snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) ) tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) ) if commit_sha is not None: snake_case_ :Union[str, Any] = ( f"""Update with commit {commit_sha}\n\nSee: """ f"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: snake_case_ :List[Any] = """Update""" upload_folder( repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, ) def A_ ( ): '''simple docstring''' snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS snake_case_ :List[str] = [] for key in pipeline_tasks: if key not in in_table: snake_case_ :int = pipeline_tasks[key]["""pt"""] if isinstance(_lowercase, (list, tuple) ): snake_case_ :Any = model[0] snake_case_ :str = model.__name__ if model not in in_table.values(): missing.append(_lowercase ) if len(_lowercase ) > 0: snake_case_ :Optional[int] = """, """.join(_lowercase ) raise ValueError( """The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """ f"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") __a = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
66
1
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowercase__ ( snake_case__, unittest.TestCase ): _UpperCAmelCase :Optional[Any] = BertTokenizer _UpperCAmelCase :Dict = BertTokenizerFast _UpperCAmelCase :str = True _UpperCAmelCase :Union[str, Any] = True _UpperCAmelCase :Any = filter_non_english def UpperCAmelCase__ ( self : Optional[int] ): super().setUp() lowerCamelCase_ : Any =[ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCamelCase_ : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Any ): lowerCamelCase_ : List[Any] ="UNwant\u00E9d,running" lowerCamelCase_ : Union[str, Any] ="unwanted, running" return input_text, output_text def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : int =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ : Optional[int] =tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(snake_case__ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : Optional[Any] ): if not self.test_rust_tokenizer: return lowerCamelCase_ : List[Any] =self.get_tokenizer() lowerCamelCase_ : Optional[int] =self.get_rust_tokenizer() lowerCamelCase_ : int ="UNwant\u00E9d,running" lowerCamelCase_ : Union[str, Any] =tokenizer.tokenize(snake_case__ ) lowerCamelCase_ : List[Any] =rust_tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowerCamelCase_ : Optional[Any] =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) lowerCamelCase_ : Dict =rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowerCamelCase_ : Optional[int] =self.get_rust_tokenizer() lowerCamelCase_ : Dict =tokenizer.encode(snake_case__ ) lowerCamelCase_ : Union[str, Any] =rust_tokenizer.encode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) # With lower casing lowerCamelCase_ : str =self.get_tokenizer(do_lower_case=snake_case__ ) lowerCamelCase_ : Union[str, Any] =self.get_rust_tokenizer(do_lower_case=snake_case__ ) lowerCamelCase_ : List[Any] ="UNwant\u00E9d,running" lowerCamelCase_ : Optional[Any] =tokenizer.tokenize(snake_case__ ) lowerCamelCase_ : int =rust_tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowerCamelCase_ : Union[str, Any] =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) lowerCamelCase_ : List[str] =rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowerCamelCase_ : List[Any] =self.get_rust_tokenizer() lowerCamelCase_ : str =tokenizer.encode(snake_case__ ) lowerCamelCase_ : Dict =rust_tokenizer.encode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self : List[Any] ): lowerCamelCase_ : Union[str, Any] =BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def UpperCAmelCase__ ( self : Optional[Any] ): lowerCamelCase_ : Optional[int] =BasicTokenizer(do_lower_case=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase__ ( self : int ): lowerCamelCase_ : Dict =BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def UpperCAmelCase__ ( self : List[Any] ): lowerCamelCase_ : Any =BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase__ ( self : List[Any] ): lowerCamelCase_ : Optional[Any] =BasicTokenizer(do_lower_case=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : Tuple =BasicTokenizer(do_lower_case=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase__ ( self : List[Any] ): lowerCamelCase_ : Union[str, Any] =BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase__ ( self : Tuple ): lowerCamelCase_ : List[Any] =BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : List[str] =BasicTokenizer(do_lower_case=snake_case__ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : Dict =BasicTokenizer() lowerCamelCase_ : int ="a\n'll !!to?'d of, can't." lowerCamelCase_ : Tuple =["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(snake_case__ ) , snake_case__ ) def UpperCAmelCase__ ( self : Optional[Any] ): lowerCamelCase_ : Dict =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCamelCase_ : List[str] ={} for i, token in enumerate(snake_case__ ): lowerCamelCase_ : Dict =i lowerCamelCase_ : List[Any] =WordpieceTokenizer(vocab=snake_case__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def UpperCAmelCase__ ( self : List[Any] ): self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def UpperCAmelCase__ ( self : Optional[int] ): self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def UpperCAmelCase__ ( self : Tuple ): lowerCamelCase_ : Any =self.get_tokenizer() lowerCamelCase_ : Optional[int] =self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(snake_case__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def UpperCAmelCase__ ( self : List[Any] ): lowerCamelCase_ : str =self.tokenizer_class.from_pretrained("bert-base-uncased" ) lowerCamelCase_ : List[str] =tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) lowerCamelCase_ : List[Any] =tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) lowerCamelCase_ : int =tokenizer.build_inputs_with_special_tokens(snake_case__ ) lowerCamelCase_ : Dict =tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : List[str] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ : List[str] =self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) lowerCamelCase_ : List[Any] =F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" lowerCamelCase_ : int =tokenizer_r.encode_plus( snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ , ) lowerCamelCase_ : List[str] =tokenizer_r.do_lower_case if hasattr(snake_case__ , "do_lower_case" ) else False lowerCamelCase_ : Optional[int] =( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : str =["的", "人", "有"] lowerCamelCase_ : str ="".join(snake_case__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ : Optional[int] =True lowerCamelCase_ : Any =self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) lowerCamelCase_ : Optional[Any] =self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) lowerCamelCase_ : Optional[Any] =tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ ) lowerCamelCase_ : List[Any] =tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ ) lowerCamelCase_ : Union[str, Any] =tokenizer_r.convert_ids_to_tokens(snake_case__ ) lowerCamelCase_ : List[str] =tokenizer_p.convert_ids_to_tokens(snake_case__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(snake_case__ , snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowerCamelCase_ : List[str] =False lowerCamelCase_ : Tuple =self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) lowerCamelCase_ : Union[str, Any] =self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) lowerCamelCase_ : List[Any] =tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ ) lowerCamelCase_ : Tuple =tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ ) lowerCamelCase_ : int =tokenizer_r.convert_ids_to_tokens(snake_case__ ) lowerCamelCase_ : List[str] =tokenizer_p.convert_ids_to_tokens(snake_case__ ) # it is expected that only the first Chinese character is not preceded by "##". lowerCamelCase_ : Tuple =[ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(snake_case__ ) ] self.assertListEqual(snake_case__ , snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ )
355
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def _snake_case ( lowerCamelCase__ : Any ) -> Union[str, Any]: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def _snake_case ( ) -> List[Any]: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" lowerCamelCase_ : Optional[Any] =[1, 2, 3] with pytest.raises(lowerCamelCase__ ): with parallel_backend("unsupported backend" ): map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=2 ) with pytest.raises(lowerCamelCase__ ): with parallel_backend("unsupported backend" ): map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def _snake_case ( lowerCamelCase__ : Tuple ) -> Optional[Any]: lowerCamelCase_ : str =[1, 2] lowerCamelCase_ : List[str] ={"a": 1, "b": 2} lowerCamelCase_ : List[str] ={"a": [1, 2], "b": [3, 4]} lowerCamelCase_ : Optional[int] ={"a": {"1": 1}, "b": 2} lowerCamelCase_ : int ={"a": 1, "b": 2, "c": 3, "d": 4} lowerCamelCase_ : Optional[int] =[2, 3] lowerCamelCase_ : List[Any] ={"a": 2, "b": 3} lowerCamelCase_ : int ={"a": [2, 3], "b": [4, 5]} lowerCamelCase_ : str ={"a": {"1": 2}, "b": 3} lowerCamelCase_ : Dict ={"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
209
0
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def __lowerCamelCase ( __UpperCamelCase ) -> int: """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) lowercase__ = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class __lowerCamelCase ( a__ ): '''simple docstring''' @staticmethod def lowerCamelCase ( a_ : List[Any] ): lowerCAmelCase_ : Optional[Any] = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=lowerCAmelCase__ , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=lowerCAmelCase__ ) def __init__( self : Tuple , a_ : Tuple , a_ : List[Any] , a_ : Optional[int] , a_ : List[Any] , a_ : Optional[int] , *a_ : Optional[Any] , ): lowerCAmelCase_ : Optional[Any] = logging.get_logger("transformers-cli/converting" ) self._logger.info(f'''Loading model {model_type}''' ) lowerCAmelCase_ : int = model_type lowerCAmelCase_ : str = tf_checkpoint lowerCAmelCase_ : Union[str, Any] = pytorch_dump_output lowerCAmelCase_ : Dict = config lowerCAmelCase_ : Tuple = finetuning_task_name def lowerCamelCase ( self : List[Any] ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(lowerCAmelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) if "ckpt" in self._tf_checkpoint.lower(): lowerCAmelCase_ : List[Any] = self._tf_checkpoint lowerCAmelCase_ : str = "" else: lowerCAmelCase_ : Optional[Any] = self._tf_checkpoint lowerCAmelCase_ : Optional[Any] = "" convert_transfo_xl_checkpoint_to_pytorch( lowerCAmelCase__ , self._config , self._pytorch_dump_output , lowerCAmelCase__ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
241
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(F'''{price_plus_tax(100, 0.25) = }''') print(F'''{price_plus_tax(125.50, 0.05) = }''')
105
0
from __future__ import annotations _lowerCamelCase = 1.60_21e-19 # units = C def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ) -> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
177
import numpy as np def SCREAMING_SNAKE_CASE ( __UpperCamelCase : np.array ) -> np.array: return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
177
1
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def __UpperCamelCase ( lowercase__ : int ): '''simple docstring''' if not isinstance(lowercase__, lowercase__ ): raise TypeError('Undefined for non-integers' ) elif precision < 1: raise ValueError('Undefined for non-natural numbers' ) __lowercase =precision __lowercase =ceil(precision / 14 ) __lowercase =42_68_80 * Decimal(1_00_05 ).sqrt() __lowercase =1 __lowercase =13_59_14_09 __lowercase =Decimal(lowercase__ ) for k in range(1, lowercase__ ): __lowercase =factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3) linear_term += 5_45_14_01_34 exponential_term *= -26_25_37_41_26_40_76_80_00 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": UpperCAmelCase = 50 print(F'''The first {n} digits of pi is: {pi(n)}''')
141
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase ( A , unittest.TestCase ): lowerCAmelCase_ = KandinskyVaaImgaImgPipeline lowerCAmelCase_ = ["image_embeds", "negative_image_embeds", "image"] lowerCAmelCase_ = [ "image_embeds", "negative_image_embeds", "image", ] lowerCAmelCase_ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] lowerCAmelCase_ = False @property def snake_case ( self : List[str] ): """simple docstring""" return 32 @property def snake_case ( self : Any ): """simple docstring""" return 32 @property def snake_case ( self : List[str] ): """simple docstring""" return self.time_input_dim @property def snake_case ( self : str ): """simple docstring""" return self.time_input_dim * 4 @property def snake_case ( self : Union[str, Any] ): """simple docstring""" return 100 @property def snake_case ( self : str ): """simple docstring""" torch.manual_seed(0 ) __lowercase ={ 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowercase =UNetaDConditionModel(**__lowercase ) return model @property def snake_case ( self : Any ): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case ( self : str ): """simple docstring""" torch.manual_seed(0 ) __lowercase =VQModel(**self.dummy_movq_kwargs ) return model def snake_case ( self : Tuple ): """simple docstring""" __lowercase =self.dummy_unet __lowercase =self.dummy_movq __lowercase ={ 'num_train_timesteps': 1000, 'beta_schedule': 'linear', 'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } __lowercase =DDIMScheduler(**__lowercase ) __lowercase ={ 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def snake_case ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : int=0 ): """simple docstring""" __lowercase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase ) __lowercase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowercase ) # create init_image __lowercase =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) __lowercase =image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowercase =Image.fromarray(np.uinta(__lowercase ) ).convert('RGB' ).resize((256, 256) ) if str(__lowercase ).startswith('mps' ): __lowercase =torch.manual_seed(__lowercase ) else: __lowercase =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowercase ={ 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def snake_case ( self : List[str] ): """simple docstring""" __lowercase ='cpu' __lowercase =self.get_dummy_components() __lowercase =self.pipeline_class(**__lowercase ) __lowercase =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowercase =pipe(**self.get_dummy_inputs(__lowercase ) ) __lowercase =output.images __lowercase =pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] __lowercase =image[0, -3:, -3:, -1] __lowercase =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowercase =np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Any ): """simple docstring""" __lowercase =load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_img2img_frog.npy' ) __lowercase =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __lowercase ='A red cartoon frog, 4k' __lowercase =KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(__lowercase ) __lowercase =KandinskyVaaImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa ) __lowercase =pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) __lowercase =torch.Generator(device='cpu' ).manual_seed(0 ) __lowercase , __lowercase =pipe_prior( __lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple() __lowercase =pipeline( image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , ) __lowercase =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
141
1
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
148
def _A ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1_000 ): """simple docstring""" a__ : Any =1 a__ : Any =0 for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ): a__ : list[int] =[] a__ : int =numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(SCREAMING_SNAKE_CASE ): a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE ) a__ : List[str] =divide_by_number else: has_been_divided.append(SCREAMING_SNAKE_CASE ) a__ : List[Any] =now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
148
1
"""simple docstring""" from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class lowerCAmelCase__ : '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = None __UpperCamelCase = None UpperCAmelCase_ : Any = namedtuple("""CoinsDistribResult""", """moves excess""") def _A (__a ) -> Union[str, Any]: """simple docstring""" if root is None: return 0 # Validation def count_nodes(__a ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(__a ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(__lowerCamelCase ) != count_coins(__lowerCamelCase ): raise ValueError('''The nodes number should be same as the number of coins''' ) # Main calculation def get_distrib(__a ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) SCREAMING_SNAKE_CASE_ : List[str] = get_distrib(node.left ) SCREAMING_SNAKE_CASE_ : List[str] = get_distrib(node.right ) SCREAMING_SNAKE_CASE_ : List[str] = 1 - left_distrib_excess SCREAMING_SNAKE_CASE_ : int = 1 - right_distrib_excess SCREAMING_SNAKE_CASE_ : List[str] = ( left_distrib_moves + right_distrib_moves + abs(__lowerCamelCase ) + abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ : Tuple = node.data - coins_to_left - coins_to_right return CoinsDistribResult(__lowerCamelCase , __lowerCamelCase ) return get_distrib(__lowerCamelCase )[0] if __name__ == "__main__": import doctest doctest.testmod()
91
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def UpperCamelCase ( __lowerCamelCase : Dataset , __lowerCamelCase : Dict[str, str] ): snake_case : int = args.log_outputs snake_case : Dict = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric snake_case : List[str] = load_metric("wer" ) snake_case : Tuple = load_metric("cer" ) # compute metrics snake_case : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] ) snake_case : int = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results snake_case : int = f"""WER: {wer_result}\nCER: {cer_result}""" print(__lowerCamelCase ) with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f: f.write(__lowerCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case : int = f"""log_{dataset_id}_predictions.txt""" snake_case : List[Any] = f"""log_{dataset_id}_targets.txt""" with open(__lowerCamelCase , "w" ) as p, open(__lowerCamelCase , "w" ) as t: # mapping function to write output def write_to_file(__lowerCamelCase : str , __lowerCamelCase : Optional[int] ): p.write(f"""{i}""" + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f"""{i}""" + "\n" ) t.write(batch["target"] + "\n" ) result.map(__lowerCamelCase , with_indices=__lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : str ): snake_case : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case : List[Any] = re.sub(__lowerCamelCase , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case : Optional[Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: snake_case : Dict = " ".join(text.split(__lowerCamelCase ) ) return text def UpperCamelCase ( __lowerCamelCase : int ): # load dataset snake_case : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case : Union[str, Any] = feature_extractor.sampling_rate # resample audio snake_case : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=__lowerCamelCase ) ) # load eval pipeline if args.device is None: snake_case : List[str] = 0 if torch.cuda.is_available() else -1 snake_case : str = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__lowerCamelCase : int ): snake_case : Dict = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case : str = prediction["text"] snake_case : Tuple = normalize_text(batch["sentence"] ) return batch # run inference on all examples snake_case : Dict = dataset.map(__lowerCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __lowerCamelCase = parser.parse_args() main(args)
59
0
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase: str = logging.get_logger(__name__) def lowercase__ ( _UpperCAmelCase ) -> Tuple: '''simple docstring''' print('Loading config file...' ) def flatten_yaml_as_dict(_UpperCAmelCase , _UpperCAmelCase="" , _UpperCAmelCase="." ): lowercase : Union[str, Any] = [] for k, v in d.items(): lowercase : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(_UpperCAmelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(_UpperCAmelCase , _UpperCAmelCase , sep=_UpperCAmelCase ).items() ) else: items.append((new_key, v) ) return dict(_UpperCAmelCase ) lowercase : Union[str, Any] = argparse.Namespace() with open(_UpperCAmelCase , 'r' ) as yaml_file: try: lowercase : str = yaml.load(_UpperCAmelCase , Loader=yaml.FullLoader ) lowercase : List[str] = flatten_yaml_as_dict(_UpperCAmelCase ) for k, v in flat_cfg.items(): setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(_UpperCAmelCase , str(_UpperCAmelCase ) ) ) return config def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase : str = MobileViTVaConfig() lowercase : Union[str, Any] = False # dataset if task_name.startswith('imagenet1k_' ): lowercase : List[Any] = 10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowercase : int = 3_84 else: lowercase : Optional[int] = 2_56 lowercase : Tuple = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): lowercase : Union[str, Any] = 2_10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowercase : List[Any] = 3_84 else: lowercase : Optional[Any] = 2_56 lowercase : List[Any] = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): lowercase : int = 1_51 lowercase : str = 5_12 lowercase : Tuple = 'ade20k-id2label.json' lowercase : Dict = True elif task_name.startswith('voc_' ): lowercase : Tuple = 21 lowercase : int = 5_12 lowercase : str = 'pascal-voc-id2label.json' lowercase : Union[str, Any] = True # orig_config lowercase : int = load_orig_config_file(_UpperCAmelCase ) assert getattr(_UpperCAmelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" lowercase : Union[str, Any] = getattr(_UpperCAmelCase , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(_UpperCAmelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowercase : Dict = getattr(_UpperCAmelCase , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowercase : Optional[int] = getattr(_UpperCAmelCase , 'model.segmentation.output_stride' , 16 ) if "_deeplabv3" in task_name: lowercase : int = getattr(_UpperCAmelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] ) lowercase : Dict = getattr(_UpperCAmelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 ) lowercase : Any = getattr(_UpperCAmelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label lowercase : Any = 'huggingface/label-files' lowercase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) ) lowercase : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} lowercase : Any = idalabel lowercase : List[str] = {v: k for k, v in idalabel.items()} return config def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase : Union[str, Any] = dct.pop(_UpperCAmelCase ) lowercase : Optional[int] = val def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase=False ) -> Dict: '''simple docstring''' if base_model: lowercase : str = '' else: lowercase : Union[str, Any] = 'mobilevitv2.' lowercase : Tuple = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowercase : Optional[Any] = k[8:] else: lowercase : List[Any] = k if ".block." in k: lowercase : Any = k_new.replace('.block.' , '.' ) if ".conv." in k: lowercase : str = k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: lowercase : List[Any] = k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: lowercase : Tuple = k_new.replace('conv_1.' , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: lowercase : Any = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowercase : Dict = k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: lowercase : Tuple = k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: lowercase : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: lowercase : List[str] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: lowercase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowercase : int = [0, 1] elif i == 4: lowercase : Optional[int] = [0, 1, 2, 3] elif i == 5: lowercase : Dict = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: lowercase : Any = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: lowercase : Optional[int] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: lowercase : str = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowercase : List[Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: lowercase : Dict = k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: lowercase : Dict = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: lowercase : Tuple = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: lowercase : Any = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: lowercase : Tuple = k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: lowercase : List[Any] = k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: lowercase : str = k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: lowercase : Optional[Any] = k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def lowercase__ ( _UpperCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase : Optional[int] = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(_UpperCAmelCase ) for k in keys_to_ignore: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) def lowercase__ ( ) -> Union[str, Any]: '''simple docstring''' lowercase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowercase : List[str] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im @torch.no_grad() def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: '''simple docstring''' lowercase : Any = get_mobilevitva_config(_UpperCAmelCase , _UpperCAmelCase ) # load original state_dict lowercase : Dict = torch.load(_UpperCAmelCase , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(_UpperCAmelCase ).eval() lowercase : str = False else: lowercase : List[Any] = MobileViTVaForImageClassification(_UpperCAmelCase ).eval() lowercase : Optional[Any] = False # remove and rename some keys of load the original model lowercase : int = checkpoint remove_unused_keys(_UpperCAmelCase ) lowercase : Optional[int] = create_rename_keys(_UpperCAmelCase , base_model=_UpperCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # load modified state_dict model.load_state_dict(_UpperCAmelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowercase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowercase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' ) lowercase : int = model(**_UpperCAmelCase ) # verify classification model if task_name.startswith('imagenet' ): lowercase : List[Any] = outputs.logits lowercase : str = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant lowercase : int = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ) assert torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": _UpperCamelCase: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='imagenet1k_256', type=str, help=( 'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ' '\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n ' ), choices=[ 'imagenet1k_256', 'imagenet1k_384', 'imagenet21k_to_1k_256', 'imagenet21k_to_1k_384', 'ade20k_deeplabv3', 'voc_deeplabv3', ], ) parser.add_argument( '--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.') parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) _UpperCamelCase: str = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
353
"""simple docstring""" import datasets from .evaluate import evaluate _UpperCamelCase: str = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' _UpperCamelCase: int = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' _UpperCamelCase: Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def lowercase ( self : List[str] ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': { 'id': datasets.Value('string' ), 'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ), }, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ), codebase_urls=['https://www.atticusprojectai.org/cuad'], reference_urls=['https://www.atticusprojectai.org/cuad'], ) def lowercase ( self : Any, lowerCAmelCase : int, lowerCAmelCase : Optional[Any] ) -> Optional[Any]: lowercase : int = {prediction['id']: prediction['prediction_text'] for prediction in predictions} lowercase : Any = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] lowercase : int = evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase ) return score
53
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( a__ ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM snake_case : Tuple = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 50 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> str: '''simple docstring''' if isinstance(self.unet.config.sample_size , UpperCamelCase_ ): snake_case : Optional[int] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: snake_case : List[str] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) snake_case : Any = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output snake_case : Optional[int] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 snake_case : Optional[int] = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case : List[str] = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
203
"""simple docstring""" def _snake_case ( _snake_case : list ): def merge(_snake_case : list , _snake_case : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(_snake_case ) <= 1: return collection lowerCAmelCase : Union[str, Any] = len(_snake_case ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() snake_case__ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip() snake_case__ : Union[str, Any] = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
60
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : List[str] = { "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = [ "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "GraphormerForGraphClassification", "GraphormerModel", "GraphormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
359
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Union[str, Any] = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
151
0
import math import unittest def UpperCamelCase ( __lowerCamelCase : int ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any: '''simple docstring''' self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]: '''simple docstring''' with self.assertRaises(snake_case__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , ) self.assertFalse( is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
59
'''simple docstring''' import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') UpperCamelCase__ , UpperCamelCase__ = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') UpperCamelCase__ = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: UpperCamelCase__ = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) UpperCamelCase__ = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
181
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''openai-gpt''' UpperCAmelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : str , UpperCAmelCase__ : Any=40_478 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : str=12 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]="cls_index" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=0.1 , **UpperCAmelCase__ : int , ) ->Dict: '''simple docstring''' A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = afn A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = summary_type A__ = summary_use_proj A__ = summary_activation A__ = summary_first_dropout A__ = summary_proj_to_labels super().__init__(**UpperCAmelCase__)
231
from __future__ import annotations import queue class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : Dict) ->Any: '''simple docstring''' A__ = data A__ = None A__ = None def SCREAMING_SNAKE_CASE ( ) -> TreeNode: """simple docstring""" print('''\n********Press N to stop entering at any point of time********\n''' ) A__ = input('''Enter the value of the root node: ''' ).strip().lower() A__ = queue.Queue() A__ = TreeNode(int(lowercase_ ) ) q.put(lowercase_ ) while not q.empty(): A__ = q.get() A__ = f"""Enter the left node of {node_found.data}: """ A__ = input(lowercase_ ).strip().lower() or '''n''' if check == "n": return tree_node A__ = TreeNode(int(lowercase_ ) ) A__ = left_node q.put(lowercase_ ) A__ = f"""Enter the right node of {node_found.data}: """ A__ = input(lowercase_ ).strip().lower() or '''n''' if check == "n": return tree_node A__ = TreeNode(int(lowercase_ ) ) A__ = right_node q.put(lowercase_ ) raise def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return A__ = queue.Queue() q.put(lowercase_ ) while not q.empty(): A__ = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return A__ = queue.Queue() q.put(lowercase_ ) while not q.empty(): A__ = [] while not q.empty(): A__ = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return A__ = [] A__ = node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(lowercase_ ) A__ = n.left # end of while means current node doesn't have left child A__ = stack.pop() # start to traverse its right child A__ = n.right def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return A__ = [] A__ = node while n or stack: while n: stack.append(lowercase_ ) A__ = n.left A__ = stack.pop() print(n.data , end=''',''' ) A__ = n.right def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return A__ , A__ = [], [] A__ = node stacka.append(lowercase_ ) while stacka: # to find the reversed order of post order, store it in stack2 A__ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(lowercase_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def SCREAMING_SNAKE_CASE ( lowercase_ = "" , lowercase_=50 , lowercase_="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char A__ , A__ = divmod(width - len(lowercase_ ) - 2 , 2 ) return f"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) _lowerCamelCase : TreeNode = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
231
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') lowercase__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __snake_case : a__ = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , ) a__ = field(default=__lowerCAmelCase , metadata={"""help""": """A folder containing the training data."""} ) a__ = field(default=__lowerCAmelCase , metadata={"""help""": """A folder containing the validation data."""} ) a__ = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) a__ = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} ) a__ = field( default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , ) a__ = field( default=__lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a__ = field( default=__lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' a__: Optional[Any] = {} if self.train_dir is not None: a__: List[str] = self.train_dir if self.validation_dir is not None: a__: Dict = self.validation_dir a__: List[Any] = data_files if data_files else None @dataclass class __snake_case : a__ = field( default=__lowerCAmelCase , metadata={ """help""": ( """The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """ """checkpoint identifier on the hub. """ """Don't set if you want to train a model from scratch.""" ) } , ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__lowerCAmelCase )} , ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ = field( default=__lowerCAmelCase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , ) a__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a__ = field(default=__lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} ) a__ = field( default=__lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) a__ = field( default=__lowerCAmelCase , metadata={ """help""": ( """The size (resolution) of each image. If not specified, will use `image_size` of the configuration.""" ) } , ) a__ = field( default=__lowerCAmelCase , metadata={ """help""": ( """The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.""" ) } , ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """Stride to use for the encoder."""} , ) class __snake_case : def __init__( self , lowercase=1_92 , lowercase=32 , lowercase=4 , lowercase=0.6) -> Optional[Any]: '''simple docstring''' a__: int = input_size a__: int = mask_patch_size a__: Dict = model_patch_size a__: int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('Input size must be divisible by mask patch size') if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('Mask patch size must be divisible by model patch size') a__: Tuple = self.input_size // self.mask_patch_size a__: List[str] = self.mask_patch_size // self.model_patch_size a__: Dict = self.rand_size**2 a__: Any = int(np.ceil(self.token_count * self.mask_ratio)) def __call__( self) -> List[Any]: '''simple docstring''' a__: int = np.random.permutation(self.token_count)[: self.mask_count] a__: List[str] = np.zeros(self.token_count , dtype=lowercase) a__: Optional[int] = 1 a__: Optional[int] = mask.reshape((self.rand_size, self.rand_size)) a__: List[str] = mask.repeat(self.scale , axis=0).repeat(self.scale , axis=1) return torch.tensor(mask.flatten()) def __a ( _SCREAMING_SNAKE_CASE ) ->Dict: a__: str = torch.stack([example['pixel_values'] for example in examples] ) a__: str = torch.stack([example['mask'] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __a ( ) ->List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a__ , a__ , a__: Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a__ , a__ , a__: List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mim' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() a__: int = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. a__: Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: a__: Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. a__: int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. a__: Any = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0: a__: str = ds['train'].train_test_split(data_args.train_val_split ) a__: Any = split['train'] a__: Dict = split['test'] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__: List[Any] = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: a__: Any = AutoConfig.from_pretrained(model_args.config_name_or_path , **_SCREAMING_SNAKE_CASE ) elif model_args.model_name_or_path: a__: str = AutoConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE ) else: a__: int = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(_SCREAMING_SNAKE_CASE , 'decoder_type' ): a__: Union[str, Any] = 'simmim' # adapt config a__: Optional[int] = model_args.image_size if model_args.image_size is not None else config.image_size a__: Optional[Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size a__: List[str] = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { 'image_size': model_args.image_size, 'patch_size': model_args.patch_size, 'encoder_stride': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: a__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_SCREAMING_SNAKE_CASE ) elif model_args.model_name_or_path: a__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE ) else: a__: Optional[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } a__: Any = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: a__: Tuple = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) a__: Optional[Any] = AutoModelForMaskedImageModeling.from_config(_SCREAMING_SNAKE_CASE ) if training_args.do_train: a__: Any = ds['train'].column_names else: a__: List[Any] = ds['validation'].column_names if data_args.image_column_name is not None: a__: Optional[Any] = data_args.image_column_name elif "image" in column_names: a__: str = 'image' elif "img" in column_names: a__: List[str] = 'img' else: a__: Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py a__: Union[str, Any] = Compose( [ Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator a__: int = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_SCREAMING_SNAKE_CASE ): a__: str = [transforms(_SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]] a__: Optional[int] = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: a__: Tuple = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: a__: Tuple = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_SCREAMING_SNAKE_CASE ) # Initialize our trainer a__: List[str] = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: a__: Optional[int] = None if training_args.resume_from_checkpoint is not None: a__: List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: a__: List[str] = last_checkpoint a__: Optional[int] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: a__: Optional[Any] = trainer.evaluate() trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub a__: Tuple = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'masked-image-modeling', 'dataset': data_args.dataset_name, 'tags': ['masked-image-modeling'], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
290
"""simple docstring""" import os from typing import Dict, List, Tuple, TypeVar, Union lowercase__ = TypeVar('T') lowercase__ = Union[List[T], Tuple[T, ...]] lowercase__ = Union[T, List[T], Dict[str, T]] lowercase__ = Union[str, bytes, os.PathLike]
290
1
'''simple docstring''' from math import factorial __a = {str(digit): factorial(digit) for digit in range(10)} def __snake_case( _lowerCAmelCase ) -> int: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCAmelCase ) ) def __snake_case( _lowerCAmelCase = 60 , _lowerCAmelCase = 1_000_000 ) -> int: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length snake_case__ : str = 0 # the cached sizes of the previous chains snake_case__ : dict[int, int] = {} for start_chain_element in range(1 , _lowerCAmelCase ): # The temporary set will contain the elements of the chain snake_case__ : Tuple = set() snake_case__ : List[str] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. snake_case__ : Tuple = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_lowerCAmelCase ) chain_set_length += 1 snake_case__ : str = digit_factorial_sum(_lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] snake_case__ : List[str] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F"{solution()}")
369
'''simple docstring''' from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class UpperCAmelCase_ : """simple docstring""" def lowerCamelCase ( self : Optional[Any] , snake_case_ : Optional[int] ): raise NotImplementedError() def lowerCamelCase ( self : Optional[int] ): raise NotImplementedError() class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : Tuple , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , **snake_case_ : Tuple ): snake_case__ : Tuple = tokenizer snake_case__ : List[str] = skip_prompt snake_case__ : Optional[int] = decode_kwargs # variables used in the streaming process snake_case__ : Optional[int] = [] snake_case__ : Optional[int] = 0 snake_case__ : List[Any] = True def lowerCamelCase ( self : List[str] , snake_case_ : int ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("""TextStreamer only supports batch size 1""" ) elif len(value.shape ) > 1: snake_case__ : Optional[Any] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: snake_case__ : List[Any] = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) snake_case__ : Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("""\n""" ): snake_case__ : int = text[self.print_len :] snake_case__ : Optional[int] = [] snake_case__ : int = 0 # If the last token is a CJK character, we print the characters. elif len(snake_case_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): snake_case__ : str = text[self.print_len :] self.print_len += len(snake_case_ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: snake_case__ : Dict = text[self.print_len : text.rfind(""" """ ) + 1] self.print_len += len(snake_case_ ) self.on_finalized_text(snake_case_ ) def lowerCamelCase ( self : int ): # Flush the cache, if it exists if len(self.token_cache ) > 0: snake_case__ : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) snake_case__ : Optional[Any] = text[self.print_len :] snake_case__ : Tuple = [] snake_case__ : int = 0 else: snake_case__ : int = """""" snake_case__ : Union[str, Any] = True self.on_finalized_text(snake_case_ , stream_end=snake_case_ ) def lowerCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : bool = False ): print(snake_case_ , flush=snake_case_ , end="""""" if not stream_end else None ) def lowerCamelCase ( self : int , snake_case_ : Optional[int] ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : Optional[int] , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , snake_case_ : Optional[float] = None , **snake_case_ : List[Any] ): super().__init__(snake_case_ , snake_case_ , **snake_case_ ) snake_case__ : Dict = Queue() snake_case__ : List[Any] = None snake_case__ : int = timeout def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : bool = False ): self.text_queue.put(snake_case_ , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self : List[str] ): return self def lowerCamelCase ( self : str ): snake_case__ : List[Any] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
43
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class __UpperCamelCase : def __init__( self, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=64, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =np.random.default_rng(lowerCAmelCase ) lowerCamelCase_ =length lowerCamelCase_ =rng.normal(size=(length,) ).astype(np.floataa ) lowerCamelCase_ =a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self ): """simple docstring""" return self.length def __getitem__( self, lowerCAmelCase ): """simple docstring""" return {"x": self.x[i], "y": self.y[i]} class __UpperCamelCase ( torch.nn.Module ): def __init__( self, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=False ): """simple docstring""" super().__init__() lowerCamelCase_ =torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCamelCase_ =torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCamelCase_ =True def lowercase__ ( self, lowerCAmelCase=None ): """simple docstring""" if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) lowerCamelCase_ =False return x * self.a[0] + self.b[0] class __UpperCamelCase ( torch.nn.Module ): def __init__( self, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=False ): """simple docstring""" super().__init__() lowerCamelCase_ =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() ) lowerCamelCase_ =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() ) lowerCamelCase_ =True def lowercase__ ( self, lowerCAmelCase=None ): """simple docstring""" if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) lowerCamelCase_ =False return x * self.a + self.b def a_ ( __snake_case : List[str] , __snake_case : int = 16 ) -> str: """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCamelCase_ ={'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} lowerCamelCase_ =load_dataset('''csv''' , data_files=__snake_case ) lowerCamelCase_ =datasets['''train'''].unique('''label''' ) lowerCamelCase_ ={v: i for i, v in enumerate(__snake_case )} def tokenize_function(__snake_case : Any ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ =tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case , padding='''max_length''' ) if "label" in examples: lowerCamelCase_ =[label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase_ =datasets.map( __snake_case , batched=__snake_case , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__snake_case : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__snake_case , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(__snake_case , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCamelCase_ =DataLoader(tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=2 ) lowerCamelCase_ =DataLoader(tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=1 ) return train_dataloader, eval_dataloader
75
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ ='''''' lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =256 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 ) lowerCamelCase_ =copy.deepcopy(self.img ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' ) lowerCamelCase_ =np.sum(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) ): lowerCamelCase_ =x[i] / self.k self.sk += prk lowerCamelCase_ =(self.L - 1) * self.sk if self.rem != 0: lowerCamelCase_ =int(last % last ) lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase ) lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase_ =self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase_ =self.img[j][i] if num != self.last_list[num]: lowerCamelCase_ =self.last_list[num] cva.imwrite('''output_data/output.jpg''', self.img ) def lowercase__ ( self ): """simple docstring""" plt.hist(self.img.ravel(), 256, [0, 256] ) def lowercase__ ( self ): """simple docstring""" cva.imshow('''Output-Image''', self.img ) cva.imshow('''Input-Image''', self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") a_ : Optional[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
75
1
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class _A : """simple docstring""" UpperCAmelCase : int UpperCAmelCase : int class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int): a : list[list[Edge]] = [[] for _ in range(__UpperCAmelCase)] a : Union[str, Any] = size def __getitem__( self : Tuple , __UpperCAmelCase : int): return iter(self._graph[vertex]) @property def __snake_case ( self : Optional[int]): return self._size def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int): if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1.") if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size).") self._graph[from_vertex].append(Edge(__UpperCAmelCase , __UpperCAmelCase)) def __snake_case ( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : int): a : List[str] = deque([start_vertex]) a : list[int | None] = [None] * self.size a : List[Any] = 0 while queue: a : int = queue.popleft() a : Union[str, Any] = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: a : Dict = current_distance + edge.weight a : Optional[Any] = distances[edge.destination_vertex] if ( isinstance(__UpperCAmelCase , __UpperCAmelCase) and new_distance >= dest_vertex_distance ): continue a : List[Any] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex) else: queue.append(edge.destination_vertex) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex.") return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer __A = logging.get_logger(__name__) __A = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __A = { '''vocab_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-openqa''': ( '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-reader''': ( '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-openqa''': ( '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-reader''': ( '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json''' ), }, } __A = { '''google/realm-cc-news-pretrained-embedder''': 512, '''google/realm-cc-news-pretrained-encoder''': 512, '''google/realm-cc-news-pretrained-scorer''': 512, '''google/realm-cc-news-pretrained-openqa''': 512, '''google/realm-orqa-nq-openqa''': 512, '''google/realm-orqa-nq-reader''': 512, '''google/realm-orqa-wq-openqa''': 512, '''google/realm-orqa-wq-reader''': 512, } __A = { '''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-reader''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-reader''': {'''do_lower_case''': True}, } class _snake_case ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = RealmTokenizer def __init__( self : Optional[int] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]="[UNK]" , UpperCAmelCase : Tuple="[SEP]" , UpperCAmelCase : List[str]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) __lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): __lowerCamelCase : str = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) __lowerCamelCase : Any = do_lower_case __lowerCamelCase : List[Any] = strip_accents __lowerCamelCase : Optional[Any] = tokenize_chinese_chars __lowerCamelCase : int = normalizer_class(**UpperCAmelCase ) __lowerCamelCase : List[Any] = do_lower_case def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Dict , **UpperCAmelCase : int ): __lowerCamelCase : Optional[int] = PaddingStrategy.MAX_LENGTH __lowerCamelCase : List[Any] = text __lowerCamelCase : Optional[int] = kwargs.pop("text_pair" , UpperCAmelCase ) __lowerCamelCase : List[Any] = kwargs.pop("return_tensors" , UpperCAmelCase ) __lowerCamelCase : Dict = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(UpperCAmelCase ): if batch_text_pair is not None: __lowerCamelCase : List[str] = batch_text_pair[idx] else: __lowerCamelCase : Optional[int] = None __lowerCamelCase : List[str] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) __lowerCamelCase : Union[str, Any] = encoded_candidates.get("input_ids" ) __lowerCamelCase : Optional[int] = encoded_candidates.get("attention_mask" ) __lowerCamelCase : int = encoded_candidates.get("token_type_ids" ) if encoded_input_ids is not None: output_data["input_ids"].append(UpperCAmelCase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(UpperCAmelCase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(UpperCAmelCase ) __lowerCamelCase : Union[str, Any] = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0} return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase ) def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ): __lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): __lowerCamelCase : Tuple = [self.sep_token_id] __lowerCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): __lowerCamelCase : Any = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
135
0
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black snake_case_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. snake_case_ = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n' class A_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]: UpperCAmelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) ) UpperCAmelCase = self.transformer_dir shutil.copy( os.path.join(__lowerCamelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , ) def UpperCAmelCase__ ( self :int ) -> Optional[Any]: UpperCAmelCase = '''src/transformers''' shutil.rmtree(self.transformer_dir ) def UpperCAmelCase__ ( self :Tuple , lowercase_ :int , lowercase_ :Optional[int] , lowercase_ :Any , lowercase_ :Union[str, Any]=None ) -> Tuple: UpperCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: UpperCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) UpperCAmelCase = black.format_str(__lowerCamelCase , mode=__lowerCamelCase ) UpperCAmelCase = os.path.join(self.transformer_dir , 'new_code.py' ) with open(__lowerCamelCase , 'w' , newline='\n' ) as f: f.write(__lowerCamelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__lowerCamelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__lowerCamelCase ) with open(__lowerCamelCase , 'r' ) as f: self.assertTrue(f.read() , __lowerCamelCase ) def UpperCAmelCase__ ( self :Tuple ) -> str: UpperCAmelCase = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[Any]: self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , __lowerCamelCase , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , __lowerCamelCase ) , ) # Copy consistency with a really long name UpperCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , __lowerCamelCase , __lowerCamelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , __lowerCamelCase , overwrite_result=re.sub('Bert' , 'TestModel' , __lowerCamelCase ) , ) def UpperCAmelCase__ ( self :Dict ) -> int: UpperCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] UpperCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) UpperCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) UpperCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) UpperCAmelCase = check_copies.convert_to_localized_md( __lowerCamelCase , __lowerCamelCase , localized_readme['format_model_list'] ) self.assertFalse(__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) UpperCAmelCase = check_copies.convert_to_localized_md( __lowerCamelCase , __lowerCamelCase , localized_readme['format_model_list'] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__lowerCamelCase ) UpperCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) UpperCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) UpperCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) UpperCAmelCase = check_copies.convert_to_localized_md( __lowerCamelCase , __lowerCamelCase , localized_readme['format_model_list'] ) # Check if the model link is synchronized. self.assertEqual(__lowerCamelCase , __lowerCamelCase )
356
"""simple docstring""" # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case_ = { """configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""], """tokenization_cpmant""": ["""CpmAntTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""", """CpmAntForCausalLM""", """CpmAntModel""", """CpmAntPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
181
0