code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : str = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
711
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {} lowerCAmelCase : Dict[Optional[str], str] = {} lowerCAmelCase : Dict[Optional[str], Exception] = {} def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __lowerCAmelCase = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __lowerCAmelCase = format_type def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __lowerCAmelCase = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def __lowerCAmelCase ( lowerCamelCase : Optional[str] ): '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = get_format_type_from_alias(lowerCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
39
0
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[str] = (CMStochasticIterativeScheduler,) a : str = 1_0 def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str: __lowerCAmelCase = { "num_train_timesteps": 201, "sigma_min": 0.0_02, "sigma_max": 80.0, } config.update(**UpperCamelCase ) return config def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = 10 __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps[0] __lowerCAmelCase = scheduler.timesteps[1] __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase_ ( self ) -> Any: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = 1 scheduler.set_timesteps(UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCamelCase ): # 1. scale model input __lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # 2. predict noise residual __lowerCAmelCase = model(UpperCamelCase , UpperCamelCase ) # 3. predict previous sample x_t-1 __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 192.7614 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [106, 0] scheduler.set_timesteps(timesteps=UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # 2. predict noise residual __lowerCAmelCase = model(UpperCamelCase , UpperCamelCase ) # 3. predict previous sample x_t-1 __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 347.6357 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [39, 30, 12, 15, 0] with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [39, 30, 12, 1, 0] __lowerCAmelCase = len(UpperCamelCase ) with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCamelCase )
712
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCAmelCase ( lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowerCAmelCase = [3, 3, 3, 3] __lowerCAmelCase = [5, 5, 5, 5] elif "fl4" in model_name: __lowerCAmelCase = [4, 4, 4, 4] __lowerCAmelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowerCAmelCase = [3, 3, 3, 3] if "lrf" in model_name: __lowerCAmelCase = [3, 3, 3, 3] else: __lowerCAmelCase = [2, 2, 2, 2] if "tiny" in model_name: __lowerCAmelCase = 96 elif "small" in model_name: __lowerCAmelCase = 96 elif "base" in model_name: __lowerCAmelCase = 1_28 elif "large" in model_name: __lowerCAmelCase = 1_92 elif "xlarge" in model_name: __lowerCAmelCase = 2_56 elif "huge" in model_name: __lowerCAmelCase = 3_52 # set label information __lowerCAmelCase = "huggingface/label-files" if "large" in model_name or "huge" in model_name: __lowerCAmelCase = "imagenet-22k-id2label.json" else: __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) ) __lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()} __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = FocalNetConfig( embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , ) return config def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' if "patch_embed.proj" in name: __lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowerCAmelCase = "encoder." + name if "encoder.layers" in name: __lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: __lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: __lowerCAmelCase = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": __lowerCAmelCase = "layernorm.weight" if name == "norm.bias": __lowerCAmelCase = "layernorm.bias" if "head" in name: __lowerCAmelCase = name.replace("head" , "classifier" ) else: __lowerCAmelCase = "focalnet." + name return name def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ): '''simple docstring''' __lowerCAmelCase = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on __lowerCAmelCase = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase ) __lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): __lowerCAmelCase = state_dict.pop(lowerCamelCase ) __lowerCAmelCase = val __lowerCAmelCase = get_focalnet_config(lowerCamelCase ) __lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase ) model.eval() # load state dict model.load_state_dict(lowerCamelCase ) # verify conversion __lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCAmelCase = BitImageProcessor( do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , ) __lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) __lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" ) __lowerCAmelCase = transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) __lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 ) __lowerCAmelCase = model(**lowerCamelCase ) __lowerCAmelCase = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": __lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": __lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": __lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": __lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": __lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: print(f'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(f'''{model_name}''' ) processor.push_to_hub(f'''{model_name}''' ) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase : Optional[int] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
39
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Union[str, Any] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SEWForCTC''', '''SEWForSequenceClassification''', '''SEWModel''', '''SEWPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
713
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase : str = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase : Optional[Any] = { '''squeezebert/squeezebert-uncased''': 5_1_2, '''squeezebert/squeezebert-mnli''': 5_1_2, '''squeezebert/squeezebert-mnli-headless''': 5_1_2, } lowerCAmelCase : Tuple = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Dict = VOCAB_FILES_NAMES a : Any = PRETRAINED_VOCAB_FILES_MAP a : Dict = PRETRAINED_INIT_CONFIGURATION a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Optional[Any] = SqueezeBertTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]: super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars ): __lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) ) __lowerCAmelCase = do_lower_case __lowerCAmelCase = strip_accents __lowerCAmelCase = tokenize_chinese_chars __lowerCAmelCase = normalizer_class(**UpperCamelCase ) __lowerCAmelCase = do_lower_case def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str: __lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]: __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]: __lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase )
39
0
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=14 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=0.02 , ) -> List[Any]: __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_input_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = rotary_dim __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = initializer_range __lowerCAmelCase = None __lowerCAmelCase = vocab_size - 1 __lowerCAmelCase = vocab_size - 1 __lowerCAmelCase = vocab_size - 1 def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = 20 __lowerCAmelCase = model_class_name(UpperCamelCase ) __lowerCAmelCase = model.init_cache(input_ids.shape[0] , UpperCamelCase ) __lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" ) __lowerCAmelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCAmelCase = model( input_ids[:, :-1] , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , position_ids=UpperCamelCase , ) __lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" ) __lowerCAmelCase = model( input_ids[:, -1:] , attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCamelCase , ) __lowerCAmelCase = model(UpperCamelCase ) __lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: __lowerCAmelCase = 20 __lowerCAmelCase = model_class_name(UpperCamelCase ) __lowerCAmelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCAmelCase = model.init_cache(input_ids.shape[0] , UpperCamelCase ) __lowerCAmelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCAmelCase = model( input_ids[:, :-1] , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , position_ids=UpperCamelCase , ) __lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" ) __lowerCAmelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCamelCase , position_ids=UpperCamelCase , ) __lowerCAmelCase = model(UpperCamelCase , attention_mask=UpperCamelCase ) __lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () a : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCAmelCase_ ( self ) -> int: __lowerCAmelCase = FlaxGPTJModelTester(self ) def UpperCAmelCase_ ( self ) -> str: for model_class_name in self.all_model_classes: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: for model_class_name in self.all_model_classes: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) @tooslow def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" ) __lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=UpperCamelCase , truncation=UpperCamelCase ) __lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" ) __lowerCAmelCase = False __lowerCAmelCase = model.config.eos_token_id __lowerCAmelCase = jax.jit(model.generate ) __lowerCAmelCase = jit_generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCAmelCase = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) __lowerCAmelCase = [ "Hello this is a long string of text.\n\nI'm trying to get the text of the", "Hey, I'm a little late to the party. I'm going to", ] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @is_pt_flax_cross_test def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCAmelCase = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCAmelCase = getattr(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape __lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCamelCase ): __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = pt_model_class(UpperCamelCase ).eval() __lowerCAmelCase = model_class(UpperCamelCase , dtype=jnp.floataa ) __lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase ) __lowerCAmelCase = fx_state with torch.no_grad(): __lowerCAmelCase = pt_model(**UpperCamelCase ).to_tuple() __lowerCAmelCase = fx_model(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(UpperCamelCase , UpperCamelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCamelCase ) __lowerCAmelCase = model_class.from_pretrained(UpperCamelCase , from_pt=UpperCamelCase ) __lowerCAmelCase = fx_model_loaded(**UpperCamelCase ).to_tuple() self.assertEqual( len(UpperCamelCase ) , len(UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(UpperCamelCase , UpperCamelCase ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCAmelCase = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCAmelCase = getattr(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = pt_model_class(UpperCamelCase ).eval() __lowerCAmelCase = model_class(UpperCamelCase , dtype=jnp.floataa ) __lowerCAmelCase = load_flax_weights_in_pytorch_model(UpperCamelCase , fx_model.params ) __lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape __lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCamelCase ): __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 0 __lowerCAmelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCAmelCase = pt_model(**UpperCamelCase ).to_tuple() __lowerCAmelCase = fx_model(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(UpperCamelCase , UpperCamelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCamelCase ) __lowerCAmelCase = pt_model_class.from_pretrained(UpperCamelCase , from_flax=UpperCamelCase ) with torch.no_grad(): __lowerCAmelCase = pt_model_loaded(**UpperCamelCase ).to_tuple() self.assertEqual( len(UpperCamelCase ) , len(UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(UpperCamelCase , UpperCamelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def UpperCAmelCase_ ( self ) -> Optional[int]: for model_class_name in self.all_model_classes: __lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" ) __lowerCAmelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase )
714
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(lowerCamelCase ) / len(lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
39
0
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __lowerCAmelCase ( lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = filter(lambda lowerCamelCase : p.requires_grad , model.parameters() ) __lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase : Dict = logging.getLogger(__name__) def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : str ): '''simple docstring''' if metric == "rouge2": __lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": __lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": __lowerCAmelCase = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' " function." ) __lowerCAmelCase = ModelCheckpoint( dirpath=lowerCamelCase , filename=lowerCamelCase , monitor=f'''val_{metric}''' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int ): '''simple docstring''' return EarlyStopping( monitor=f'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=lowerCamelCase , verbose=lowerCamelCase , ) class UpperCAmelCase__ ( pl.Callback ): def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(UpperCamelCase ) @rank_zero_only def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=True ) -> None: logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) __lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results __lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowerCAmelCase = od / "test_results.txt" __lowerCAmelCase = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowerCAmelCase = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' __lowerCAmelCase = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=UpperCamelCase ) generations_file.parent.mkdir(exist_ok=UpperCamelCase ) with open(UpperCamelCase , "a+" ) as writer: for key in sorted(UpperCamelCase ): if key in ["log", "progress_bar", "preds"]: continue __lowerCAmelCase = metrics[key] if isinstance(UpperCamelCase , torch.Tensor ): __lowerCAmelCase = val.item() __lowerCAmelCase = F'''{key}: {val:.6f}\n''' writer.write(UpperCamelCase ) if not save_generations: return if "preds" in metrics: __lowerCAmelCase = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(UpperCamelCase ) @rank_zero_only def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[int]: try: __lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: __lowerCAmelCase = pl_module.model.num_parameters() __lowerCAmelCase = count_trainable_parameters(UpperCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> int: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(UpperCamelCase , UpperCamelCase , "test" ) @rank_zero_only def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
715
'''simple docstring''' import re def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' __lowerCAmelCase = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
39
0
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase : List[Any] = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : int=None , lowerCamelCase : List[str]=None , lowerCamelCase : List[Any]=None , lowerCamelCase : int=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Any=None , ): '''simple docstring''' if attention_mask is None: __lowerCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __lowerCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __lowerCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=99 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=0.02 , ) -> Optional[Any]: __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = eos_token_id __lowerCAmelCase = pad_token_id __lowerCAmelCase = bos_token_id __lowerCAmelCase = initializer_range def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __lowerCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __lowerCAmelCase = shift_tokens_right(UpperCamelCase , 1 , 2 ) __lowerCAmelCase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase , ) __lowerCAmelCase = prepare_blenderbot_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: __lowerCAmelCase = 20 __lowerCAmelCase = model_class_name(UpperCamelCase ) __lowerCAmelCase = model.encode(inputs_dict["input_ids"] ) __lowerCAmelCase , __lowerCAmelCase = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) __lowerCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowerCAmelCase = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __lowerCAmelCase = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __lowerCAmelCase = model.decode(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: __lowerCAmelCase = 20 __lowerCAmelCase = model_class_name(UpperCamelCase ) __lowerCAmelCase = model.encode(inputs_dict["input_ids"] ) __lowerCAmelCase , __lowerCAmelCase = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __lowerCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowerCAmelCase = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __lowerCAmelCase = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __lowerCAmelCase = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class UpperCAmelCase__ ( unittest.TestCase ): a : Optional[Any] = 9_9 def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) __lowerCAmelCase = input_ids.shape[0] __lowerCAmelCase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_config_and_data() __lowerCAmelCase = FlaxBlenderbotForConditionalGeneration(UpperCamelCase ) __lowerCAmelCase = lm_model(input_ids=UpperCamelCase ) __lowerCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> int: __lowerCAmelCase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) __lowerCAmelCase = FlaxBlenderbotForConditionalGeneration(UpperCamelCase ) __lowerCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) __lowerCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) __lowerCAmelCase = lm_model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ) __lowerCAmelCase = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) __lowerCAmelCase = shift_tokens_right(UpperCamelCase , 1 , 2 ) __lowerCAmelCase = np.equal(UpperCamelCase , 1 ).astype(np.floataa ).sum() __lowerCAmelCase = np.equal(UpperCamelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCamelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase , UpperCamelCase__ ): a : Tuple = True a : Tuple = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) a : Optional[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = FlaxBlenderbotModelTester(self ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCAmelCase = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("JIT Enabled" ): __lowerCAmelCase = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowerCAmelCase = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCAmelCase = model_class(UpperCamelCase ) __lowerCAmelCase = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) __lowerCAmelCase = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase , UpperCamelCase , UpperCamelCase ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("JIT Enabled" ): __lowerCAmelCase = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowerCAmelCase = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCAmelCase_ ( self ) -> Dict: for model_class_name in self.all_model_classes: __lowerCAmelCase = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __lowerCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id __lowerCAmelCase = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." ) @slow def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25} __lowerCAmelCase = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True} __lowerCAmelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCamelCase ) __lowerCAmelCase = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" ) __lowerCAmelCase = ["Sam"] __lowerCAmelCase = tokenizer(UpperCamelCase , return_tensors="jax" ) __lowerCAmelCase = model.generate(**UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = "Sam is a great name. It means \"sun\" in Gaelic." __lowerCAmelCase = tokenizer.batch_decode(UpperCamelCase , **UpperCamelCase ) assert generated_txt[0].strip() == tgt_text
716
'''simple docstring''' import os import sys import unittest lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = {"BertModelTest": "BertModelTester"} __lowerCAmelCase = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase ) __lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase ) __lowerCAmelCase = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } __lowerCAmelCase = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } __lowerCAmelCase = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
39
0
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase ): @property def UpperCAmelCase_ ( self ) -> str: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = ort.SessionOptions() __lowerCAmelCase = False return options def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) __lowerCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = "A red cat sitting on a park bench" __lowerCAmelCase = np.random.RandomState(0 ) __lowerCAmelCase = pipe( prompt=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase , output_type="np" , ) __lowerCAmelCase = output.images __lowerCAmelCase = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __lowerCAmelCase = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) __lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" ) __lowerCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = "A red cat sitting on a park bench" __lowerCAmelCase = np.random.RandomState(0 ) __lowerCAmelCase = pipe( prompt=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase , output_type="np" , ) __lowerCAmelCase = output.images __lowerCAmelCase = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __lowerCAmelCase = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
717
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class UpperCAmelCase__ ( UpperCamelCase__ ): a : torch.FloatTensor class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]: super().__init__() __lowerCAmelCase = num_attention_heads __lowerCAmelCase = attention_head_dim __lowerCAmelCase = num_attention_heads * attention_head_dim __lowerCAmelCase = in_channels __lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) # 3. Define transformers blocks __lowerCAmelCase = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , ) for d in range(UpperCamelCase ) ] ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape __lowerCAmelCase = batch_frames // num_frames __lowerCAmelCase = hidden_states __lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __lowerCAmelCase = self.norm(UpperCamelCase ) __lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = self.proj_in(UpperCamelCase ) # 2. Blocks for block in self.transformer_blocks: __lowerCAmelCase = block( UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , ) # 3. Output __lowerCAmelCase = self.proj_out(UpperCamelCase ) __lowerCAmelCase = ( hidden_states[None, None, :] .reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=UpperCamelCase )
39
0
'''simple docstring''' from pathlib import Path import numpy as np from PIL import Image def __lowerCAmelCase ( lowerCamelCase : np.ndarray ): '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b def __lowerCAmelCase ( lowerCamelCase : np.ndarray ): '''simple docstring''' return (gray > 1_27) & (gray <= 2_55) def __lowerCAmelCase ( lowerCamelCase : np.ndarray , lowerCamelCase : np.ndarray ): '''simple docstring''' __lowerCAmelCase = np.zeros_like(lowerCamelCase ) __lowerCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __lowerCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __lowerCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __lowerCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image lowerCAmelCase : Union[str, Any] = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg''' lowerCAmelCase : Any = np.array(Image.open(lena_path)) # kernel to be applied lowerCAmelCase : Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) lowerCAmelCase : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image lowerCAmelCase : Any = Image.fromarray(output).convert('''RGB''') pil_img.save('''result_dilation.png''')
718
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" __lowerCAmelCase = "f32le" __lowerCAmelCase = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: __lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error __lowerCAmelCase = output_stream[0] __lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" if format_for_conversion == "s16le": __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) __lowerCAmelCase = platform.system() if system == "Linux": __lowerCAmelCase = "alsa" __lowerCAmelCase = "default" elif system == "Darwin": __lowerCAmelCase = "avfoundation" __lowerCAmelCase = ":0" elif system == "Windows": __lowerCAmelCase = "dshow" __lowerCAmelCase = "default" __lowerCAmelCase = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase ) for item in iterator: yield item def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ): '''simple docstring''' if stream_chunk_s is not None: __lowerCAmelCase = stream_chunk_s else: __lowerCAmelCase = chunk_length_s __lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": __lowerCAmelCase = np.intaa __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = np.floataa __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: __lowerCAmelCase = chunk_length_s / 6 __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase , (int, float) ): __lowerCAmelCase = [stride_length_s, stride_length_s] __lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __lowerCAmelCase = datetime.datetime.now() __lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ): # Put everything back in numpy scale __lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase ) __lowerCAmelCase = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) __lowerCAmelCase = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ): '''simple docstring''' __lowerCAmelCase = B"" __lowerCAmelCase , __lowerCAmelCase = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) __lowerCAmelCase = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: __lowerCAmelCase = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator __lowerCAmelCase = (_stride_left, stride_right) __lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride} if stream: __lowerCAmelCase = False yield item __lowerCAmelCase = stride_left __lowerCAmelCase = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: __lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)} if stream: __lowerCAmelCase = False yield item def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process: while True: __lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
39
0
'''simple docstring''' from __future__ import annotations from collections import deque class UpperCAmelCase__ : def __init__( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = [] self.adlist.append( {"value": "", "next_states": [], "fail_state": 0, "output": []} ) for keyword in keywords: self.add_keyword(UpperCamelCase ) self.set_fail_transitions() def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCAmelCase_ ( self , UpperCamelCase ) -> None: __lowerCAmelCase = 0 for character in keyword: __lowerCAmelCase = self.find_next_state(UpperCamelCase , UpperCamelCase ) if next_state is None: self.adlist.append( { "value": character, "next_states": [], "fail_state": 0, "output": [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) __lowerCAmelCase = len(self.adlist ) - 1 else: __lowerCAmelCase = next_state self.adlist[current_state]["output"].append(UpperCamelCase ) def UpperCAmelCase_ ( self ) -> None: __lowerCAmelCase = deque() for node in self.adlist[0]["next_states"]: q.append(UpperCamelCase ) __lowerCAmelCase = 0 while q: __lowerCAmelCase = q.popleft() for child in self.adlist[r]["next_states"]: q.append(UpperCamelCase ) __lowerCAmelCase = self.adlist[r]["fail_state"] while ( self.find_next_state(UpperCamelCase , self.adlist[child]["value"] ) is None and state != 0 ): __lowerCAmelCase = self.adlist[state]["fail_state"] __lowerCAmelCase = self.find_next_state( UpperCamelCase , self.adlist[child]["value"] ) if self.adlist[child]["fail_state"] is None: __lowerCAmelCase = 0 __lowerCAmelCase = ( self.adlist[child]["output"] + self.adlist[self.adlist[child]["fail_state"]]["output"] ) def UpperCAmelCase_ ( self , UpperCamelCase ) -> dict[str, list[int]]: __lowerCAmelCase = {} # returns a dict with keywords and list of its occurrences __lowerCAmelCase = 0 for i in range(len(UpperCamelCase ) ): while ( self.find_next_state(UpperCamelCase , string[i] ) is None and current_state != 0 ): __lowerCAmelCase = self.adlist[current_state]["fail_state"] __lowerCAmelCase = self.find_next_state(UpperCamelCase , string[i] ) if next_state is None: __lowerCAmelCase = 0 else: __lowerCAmelCase = next_state for key in self.adlist[current_state]["output"]: if key not in result: __lowerCAmelCase = [] result[key].append(i - len(UpperCamelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
719
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def __lowerCAmelCase ( lowerCamelCase : List[str] ): '''simple docstring''' return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class UpperCAmelCase__ ( UpperCamelCase__ ): @staticmethod def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple: __lowerCAmelCase = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" ) download_parser.set_defaults(func=UpperCamelCase ) def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: __lowerCAmelCase = model __lowerCAmelCase = cache __lowerCAmelCase = force __lowerCAmelCase = trust_remote_code def UpperCAmelCase_ ( self ) -> Any: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
39
0
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : list[int] ): '''simple docstring''' if not numbers: return 0 if not isinstance(lowerCamelCase , (list, tuple) ) or not all( isinstance(lowerCamelCase , lowerCamelCase ) for number in numbers ): raise ValueError("numbers must be an iterable of integers" ) __lowerCAmelCase = __lowerCAmelCase = __lowerCAmelCase = numbers[0] for i in range(1 , len(lowerCamelCase ) ): # update the maximum and minimum subarray products __lowerCAmelCase = numbers[i] if number < 0: __lowerCAmelCase , __lowerCAmelCase = min_till_now, max_till_now __lowerCAmelCase = max(lowerCamelCase , max_till_now * number ) __lowerCAmelCase = min(lowerCamelCase , min_till_now * number ) # update the maximum product found till now __lowerCAmelCase = max(lowerCamelCase , lowerCamelCase ) return max_prod
720
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
39
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : List[Any] = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
721
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Optional[Any] = """dpr""" def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple: super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = projection_dim __lowerCAmelCase = position_embedding_type
39
0
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : Dict = XGLMTokenizer a : str = XGLMTokenizerFast a : int = True a : Optional[Any] = True def UpperCAmelCase_ ( self ) -> List[str]: super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = XGLMTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = "<pad>" __lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(UpperCamelCase ) , 1008 ) def UpperCAmelCase_ ( self ) -> Optional[int]: self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = XGLMTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) __lowerCAmelCase = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def UpperCAmelCase_ ( self ) -> List[str]: return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def UpperCAmelCase_ ( self ) -> str: with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase , f.name ) __lowerCAmelCase = XGLMTokenizer(f.name , keep_accents=UpperCamelCase ) __lowerCAmelCase = pickle.dumps(UpperCamelCase ) pickle.loads(UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: if not self.test_rust_tokenizer: return __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.tokenize(UpperCamelCase ) __lowerCAmelCase = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) __lowerCAmelCase = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = tokenizer.encode(UpperCamelCase ) __lowerCAmelCase = rust_tokenizer.encode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = "Hello World!" __lowerCAmelCase = [2, 3_1227, 4447, 35] self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) ) @slow def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off __lowerCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: # fmt: off __lowerCAmelCase = { "input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="facebook/xglm-564M" , padding=UpperCamelCase , )
700
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
0
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowerCAmelCase : List[str] = logging.getLogger() def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("-f" ) __lowerCAmelCase = parser.parse_args() return args.f def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' __lowerCAmelCase = {} __lowerCAmelCase = os.path.join(lowerCamelCase , "all_results.json" ) if os.path.exists(lowerCamelCase ): with open(lowerCamelCase , "r" ) as f: __lowerCAmelCase = json.load(lowerCamelCase ) else: raise ValueError(f'''can\'t find {path}''' ) return results def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() lowerCAmelCase : Dict = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod def UpperCAmelCase_ ( cls ) -> str: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __lowerCAmelCase = tempfile.mkdtemp() __lowerCAmelCase = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) __lowerCAmelCase = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def UpperCAmelCase_ ( cls ) -> List[Any]: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __lowerCAmelCase = get_results(UpperCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __lowerCAmelCase = get_results(UpperCamelCase ) self.assertLess(result["perplexity"] , 100 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __lowerCAmelCase = get_results(UpperCamelCase ) self.assertLess(result["perplexity"] , 42 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ) -> str: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __lowerCAmelCase = 7 if get_gpu_count() > 1 else 2 __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __lowerCAmelCase = get_results(UpperCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __lowerCAmelCase = get_results(UpperCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 28 ) self.assertGreaterEqual(result["eval_exact"] , 28 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) __lowerCAmelCase = get_results(UpperCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __lowerCAmelCase = get_results(UpperCamelCase ) self.assertGreaterEqual(result["eval_rouge1"] , 10 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __lowerCAmelCase = get_results(UpperCamelCase ) self.assertGreaterEqual(result["eval_bleu"] , 30 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "translation_no_trainer" ) ) ) @slow def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = logging.StreamHandler(sys.stdout ) logger.addHandler(UpperCamelCase ) __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) __lowerCAmelCase = get_results(UpperCamelCase ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __lowerCAmelCase = get_results(UpperCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , "image_classification_no_trainer" ) ) )
701
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"} __lowerCAmelCase = features.copy() __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' if issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = jsonl_path elif issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = [jsonl_path] __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) for split in splits: __lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' if split: __lowerCAmelCase = {split: jsonl_path} else: __lowerCAmelCase = "train" __lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path} __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ): '''simple docstring''' return json.load(lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' return [json.loads(lowerCamelCase ) for line in buffer] class UpperCAmelCase__ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: with pytest.raises(UpperCamelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' __lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() assert exported_content == original_content
39
0
'''simple docstring''' from __future__ import annotations import math def __lowerCAmelCase ( lowerCamelCase : int ): '''simple docstring''' if num <= 0: __lowerCAmelCase = f'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(lowerCamelCase ) __lowerCAmelCase = [True] * (num + 1) __lowerCAmelCase = [] __lowerCAmelCase = 2 __lowerCAmelCase = int(math.sqrt(lowerCamelCase ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCamelCase ) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCamelCase ): if sieve[i] is True: __lowerCAmelCase = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(lowerCamelCase ) return prime if __name__ == "__main__": print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
702
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : Optional[Any] = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = """layoutlmv3""" def __init__( self , UpperCamelCase=5_0265 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-5 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , UpperCamelCase=1024 , UpperCamelCase=128 , UpperCamelCase=128 , UpperCamelCase=True , UpperCamelCase=32 , UpperCamelCase=128 , UpperCamelCase=64 , UpperCamelCase=256 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=224 , UpperCamelCase=3 , UpperCamelCase=16 , UpperCamelCase=None , **UpperCamelCase , ) -> Optional[int]: super().__init__( vocab_size=UpperCamelCase , hidden_size=UpperCamelCase , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , intermediate_size=UpperCamelCase , hidden_act=UpperCamelCase , hidden_dropout_prob=UpperCamelCase , attention_probs_dropout_prob=UpperCamelCase , max_position_embeddings=UpperCamelCase , type_vocab_size=UpperCamelCase , initializer_range=UpperCamelCase , layer_norm_eps=UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = max_ad_position_embeddings __lowerCAmelCase = coordinate_size __lowerCAmelCase = shape_size __lowerCAmelCase = has_relative_attention_bias __lowerCAmelCase = rel_pos_bins __lowerCAmelCase = max_rel_pos __lowerCAmelCase = has_spatial_attention_bias __lowerCAmelCase = rel_ad_pos_bins __lowerCAmelCase = max_rel_ad_pos __lowerCAmelCase = text_embed __lowerCAmelCase = visual_embed __lowerCAmelCase = input_size __lowerCAmelCase = num_channels __lowerCAmelCase = patch_size __lowerCAmelCase = classifier_dropout class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[Any] = version.parse("""1.12""" ) @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) else: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) @property def UpperCAmelCase_ ( self ) -> float: return 1E-5 @property def UpperCAmelCase_ ( self ) -> int: return 12 def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 3 , UpperCamelCase = 40 , UpperCamelCase = 40 , ) -> Mapping[str, Any]: setattr(processor.image_processor , "apply_ocr" , UpperCamelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowerCAmelCase = compute_effective_axis_dimension( UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowerCAmelCase = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase ) __lowerCAmelCase = compute_effective_axis_dimension( UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence __lowerCAmelCase = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __lowerCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __lowerCAmelCase = self._generate_dummy_images(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = dict( processor( UpperCamelCase , text=UpperCamelCase , boxes=UpperCamelCase , return_tensors=UpperCamelCase , ) ) return inputs
703
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[str] = (CMStochasticIterativeScheduler,) a : str = 1_0 def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str: __lowerCAmelCase = { "num_train_timesteps": 201, "sigma_min": 0.0_02, "sigma_max": 80.0, } config.update(**UpperCamelCase ) return config def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = 10 __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps[0] __lowerCAmelCase = scheduler.timesteps[1] __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase_ ( self ) -> Any: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = 1 scheduler.set_timesteps(UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCamelCase ): # 1. scale model input __lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # 2. predict noise residual __lowerCAmelCase = model(UpperCamelCase , UpperCamelCase ) # 3. predict previous sample x_t-1 __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [106, 0] scheduler.set_timesteps(timesteps=UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # 2. predict noise residual __lowerCAmelCase = model(UpperCamelCase , UpperCamelCase ) # 3. predict previous sample x_t-1 __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [39, 30, 12, 15, 0] with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [39, 30, 12, 1, 0] __lowerCAmelCase = len(UpperCamelCase ) with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCamelCase )
39
0
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> int: __lowerCAmelCase = tf.convert_to_tensor( [ [ 8.2_22_09_91, # 3rd highest value; idx. 0 -0.5_62_00_44, 5.23_22_97_52, 4.0_38_63_93, -6.8_79_83_78, -0.54_78_58_02, -3.2_01_21_53, 2.92_77_71_76, 1.88_17_19_53, 7.35_34_12_76, # 5th highest value; idx. 9 8.43_20_78_33, # 2nd highest value; idx. 10 -9.85_71_18_36, -5.96_20_92_36, -1.13_03_91_61, -7.1_11_52_94, -0.8_36_96_33, -5.3_18_64_08, 7.06_42_74_07, 0.81_36_93_44, -0.82_02_38_17, -5.9_17_97_96, 0.58_81_34_43, -6.99_77_84_38, 4.71_55_11_89, -0.18_77_16_37, 7.44_02_07_59, # 4th highest value; idx. 25 9.38_45_09_87, # 1st highest value; idx. 26 2.12_66_29_41, -9.32_56_20_38, 2.35_65_25_22, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_42_55_18, 4.53_13_92_38, -5.57_51_04_64, -6.28_03_06_99, -7.19_52_95_03, -4.02_12_25_51, 1.39_33_70_37, -6.06_70_70_57, 1.59_48_05_17, -9.64_31_19, 0.03_90_77_99, 0.67_23_17_62, -8.88_20_67_26, 6.27_11_59_22, # 4th highest value; idx. 13 2.28_52_07_23, 4.82_76_75_06, 4.30_42_13_68, 8.8_27_53_13, # 2nd highest value; idx. 17 5.44_02_99_58, # 5th highest value; idx. 18 -4.4_73_57_94, 7.38_57_95_36, # 3rd highest value; idx. 20 -2.91_05_16_63, 2.61_94_60_77, -2.5_67_47_62, -9.48_95_93_02, -4.02_92_26_45, -1.35_41_69_18, 9.67_70_23_23, # 1st highest value; idx. 27 -5.89_47_85_53, 1.85_37_04_67, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __lowerCAmelCase = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __lowerCAmelCase = tf.convert_to_tensor( [8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above __lowerCAmelCase = tf_top_k_top_p_filtering(UpperCamelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __lowerCAmelCase = output[output != -float("inf" )] __lowerCAmelCase = tf.cast( tf.where(tf.not_equal(UpperCamelCase , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , rtol=1E-12 ) tf.debugging.assert_equal(UpperCamelCase , UpperCamelCase ) @require_tf class UpperCAmelCase__ ( unittest.TestCase , UpperCamelCase__ ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): a : Dict = { """AutoModelForCausalLM""": TFAutoModelForCausalLM, """AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq, """AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM, """AutoModelForVision2Seq""": TFAutoModelForVisionaSeq, """LogitsProcessorList""": TFLogitsProcessorList, """MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor, """create_tensor_fn""": tf.convert_to_tensor, """floats_tensor""": floats_tensor, """return_tensors""": """tf""", } @slow def UpperCAmelCase_ ( self ) -> Any: # TF-only test: tf.saved_model export __lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) __lowerCAmelCase = 2 __lowerCAmelCase = 2 class UpperCAmelCase__ ( tf.Module ): def __init__( self , UpperCamelCase ) -> Tuple: super(UpperCamelCase , self ).__init__() __lowerCAmelCase = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ), tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ), ) , jit_compile=UpperCamelCase , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: __lowerCAmelCase = self.model.generate( input_ids=UpperCamelCase , attention_mask=UpperCamelCase , max_new_tokens=UpperCamelCase , return_dict_in_generate=UpperCamelCase , ) return {"sequences": outputs["sequences"]} __lowerCAmelCase = [[2, 0], [102, 103]] __lowerCAmelCase = [[1, 0], [1, 1]] __lowerCAmelCase = DummyModel(model=UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={"serving_default": dummy_model.serving} ) __lowerCAmelCase = tf.saved_model.load(UpperCamelCase ).signatures["serving_default"] for batch_size in range(1 , len(UpperCamelCase ) + 1 ): __lowerCAmelCase = { "input_ids": tf.constant(dummy_input_ids[:batch_size] ), "attention_mask": tf.constant(dummy_attention_masks[:batch_size] ), } __lowerCAmelCase = serving_func(**UpperCamelCase )["sequences"] __lowerCAmelCase = test_model.generate(**UpperCamelCase , max_new_tokens=UpperCamelCase ) tf.debugging.assert_equal(UpperCamelCase , UpperCamelCase ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: # TF-only test: tf.saved_model export __lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) __lowerCAmelCase = 1 __lowerCAmelCase = 2 class UpperCAmelCase__ ( tf.Module ): def __init__( self , UpperCamelCase ) -> Optional[int]: super(UpperCamelCase , self ).__init__() __lowerCAmelCase = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ), ) , jit_compile=UpperCamelCase , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> str: __lowerCAmelCase = self.model.generate( input_ids=UpperCamelCase , attention_mask=UpperCamelCase , max_new_tokens=UpperCamelCase , return_dict_in_generate=UpperCamelCase , ) return {"sequences": outputs["sequences"]} __lowerCAmelCase = [[2], [102, 103]] __lowerCAmelCase = [[1], [1, 1]] __lowerCAmelCase = DummyModel(model=UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={"serving_default": dummy_model.serving} ) __lowerCAmelCase = tf.saved_model.load(UpperCamelCase ).signatures["serving_default"] for input_row in range(len(UpperCamelCase ) ): __lowerCAmelCase = { "input_ids": tf.constant([dummy_input_ids[input_row]] ), "attention_mask": tf.constant([dummy_attention_masks[input_row]] ), } __lowerCAmelCase = serving_func(**UpperCamelCase )["sequences"] __lowerCAmelCase = test_model.generate(**UpperCamelCase , max_new_tokens=UpperCamelCase ) tf.debugging.assert_equal(UpperCamelCase , UpperCamelCase ) @slow @require_tensorflow_text def UpperCAmelCase_ ( self ) -> Union[str, Any]: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=UpperCamelCase ) class UpperCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self ) -> List[Any]: super().__init__() __lowerCAmelCase = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCamelCase , "spiece.model" ) , "rb" ).read() ) __lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" ) def UpperCAmelCase_ ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = self.tokenizer.tokenize(UpperCamelCase ) __lowerCAmelCase , __lowerCAmelCase = text.pad_model_inputs( UpperCamelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __lowerCAmelCase = self.model.generate(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) return self.tokenizer.detokenize(UpperCamelCase ) __lowerCAmelCase = CompleteSentenceTransformer() __lowerCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" ) __lowerCAmelCase = complete_model(UpperCamelCase ) __lowerCAmelCase = tf.keras.Model(UpperCamelCase , UpperCamelCase ) keras_model.save(UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: # Has PT equivalent: this test relies on random sampling __lowerCAmelCase = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } __lowerCAmelCase = 14 __lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) __lowerCAmelCase = "Hello, my dog is cute and" __lowerCAmelCase = tokenizer(UpperCamelCase , return_tensors="tf" ) __lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) __lowerCAmelCase = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0" ): tf.random.set_seed(0 ) __lowerCAmelCase = model.generate(**UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __lowerCAmelCase = [638, 198] with tf.device(":/CPU:0" ): tf.random.set_seed(0 ) __lowerCAmelCase = model.generate(**UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def UpperCAmelCase_ ( self ) -> Any: # Has PT equivalent: ample use of framework-specific code __lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" ) __lowerCAmelCase = "Hugging Face is a technology company based in New York and Paris." __lowerCAmelCase = bart_tokenizer(UpperCamelCase , return_tensors="tf" ).input_ids __lowerCAmelCase = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" ) __lowerCAmelCase = bart_model.generate(UpperCamelCase ).numpy() class UpperCAmelCase__ ( UpperCamelCase__ ): def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ) -> Any: return super().call(UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" ) __lowerCAmelCase = bart_model.generate(UpperCamelCase , foo="bar" ).numpy() self.assertTrue(np.array_equal(UpperCamelCase , UpperCamelCase ) ) class UpperCAmelCase__ ( bart_model.model.encoder.__class__ ): def UpperCAmelCase_ ( self , UpperCamelCase , **UpperCamelCase ) -> List[str]: return super().call(UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared ) __lowerCAmelCase = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __lowerCAmelCase = bart_model.generate(UpperCamelCase ).numpy() with self.assertRaises(UpperCamelCase ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCamelCase , foo="bar" )
704
'''simple docstring''' import requests from bsa import BeautifulSoup def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ): '''simple docstring''' __lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" ) __lowerCAmelCase = soup.findAll("h1" ) __lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f'{key}\n{value}\n')
39
0
'''simple docstring''' import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=99 , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=9 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase=8 , UpperCamelCase=0.1 , UpperCamelCase=0.0_02 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=0 , UpperCamelCase=None , UpperCamelCase=None , ) -> Any: __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = encoder_seq_length __lowerCAmelCase = decoder_seq_length # For common tests __lowerCAmelCase = self.decoder_seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_attention_mask __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = d_ff __lowerCAmelCase = relative_attention_num_buckets __lowerCAmelCase = dropout_rate __lowerCAmelCase = initializer_factor __lowerCAmelCase = eos_token_id __lowerCAmelCase = pad_token_id __lowerCAmelCase = decoder_start_token_id __lowerCAmelCase = None __lowerCAmelCase = decoder_layers def UpperCAmelCase_ ( self ) -> Optional[Any]: return TaConfig.from_pretrained("google/umt5-base" ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ) -> List[Any]: if attention_mask is None: __lowerCAmelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase ) if decoder_head_mask is None: __lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase ) if cross_attn_head_mask is None: __lowerCAmelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCAmelCase = self.get_config() __lowerCAmelCase = config.num_attention_heads __lowerCAmelCase = self.prepare_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, input_dict def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase_ ( self ) -> Optional[Any]: return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCAmelCase_ ( self ) -> Optional[int]: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> str: __lowerCAmelCase = UMTaModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __lowerCAmelCase = model( input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase , attention_mask=UpperCamelCase , decoder_attention_mask=UpperCamelCase , ) __lowerCAmelCase = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ) __lowerCAmelCase = result.last_hidden_state __lowerCAmelCase = result.past_key_values __lowerCAmelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(UpperCamelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Tuple: __lowerCAmelCase = UMTaModel(config=UpperCamelCase ).get_decoder().to(UpperCamelCase ).eval() # first forward pass __lowerCAmelCase = model(UpperCamelCase , use_cache=UpperCamelCase ) __lowerCAmelCase = model(UpperCamelCase ) __lowerCAmelCase = model(UpperCamelCase , use_cache=UpperCamelCase ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 ) __lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCAmelCase = model(UpperCamelCase )["last_hidden_state"] __lowerCAmelCase = model(UpperCamelCase , past_key_values=UpperCamelCase )["last_hidden_state"] # select random slice __lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , ) -> int: __lowerCAmelCase = UMTaModel(config=UpperCamelCase ).to(UpperCamelCase ).half().eval() __lowerCAmelCase = model(**UpperCamelCase )["last_hidden_state"] self.parent.assertFalse(torch.isnan(UpperCamelCase ).any().item() ) @require_torch class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else () a : Any = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a : Union[str, Any] = True a : Optional[int] = False a : Optional[int] = False a : Tuple = True a : Tuple = True # The small UMT5 model needs higher percentages for CPU/MP tests a : str = [0.8, 0.9] def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() __lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=UpperCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = ["encoder_attentions", "decoder_attentions", "cross_attentions"] __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() __lowerCAmelCase = config_and_inputs[0] __lowerCAmelCase = UMTaForConditionalGeneration(UpperCamelCase ).eval() model.to(UpperCamelCase ) __lowerCAmelCase = { "head_mask": torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase ), "decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ), } for attn_name, (name, mask) in zip(UpperCamelCase , head_masking.items() ): __lowerCAmelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCAmelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=UpperCamelCase ) __lowerCAmelCase = model.generate( config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase , return_dict_in_generate=UpperCamelCase , **UpperCamelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def UpperCAmelCase_ ( self ) -> Optional[int]: pass @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=UpperCamelCase ).to(UpperCamelCase ) __lowerCAmelCase = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=UpperCamelCase , legacy=UpperCamelCase ) __lowerCAmelCase = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] __lowerCAmelCase = tokenizer(UpperCamelCase , return_tensors="pt" , padding=UpperCamelCase ).input_ids # fmt: off __lowerCAmelCase = torch.tensor( [ [ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = model.generate(input_ids.to(UpperCamelCase ) ) __lowerCAmelCase = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] __lowerCAmelCase = tokenizer.batch_decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase )
705
'''simple docstring''' from __future__ import annotations import math def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2: raise Exception("Matrices are not 2x2" ) __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(lowerCamelCase ) ) ] def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(lowerCamelCase ) ) ] def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("Odd matrices are not supported!" ) __lowerCAmelCase = len(lowerCamelCase ) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase ) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )] return top_left, top_right, bot_left, bot_right def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' return len(lowerCamelCase ), len(matrix[0] ) def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' print("\n".join(str(lowerCamelCase ) for line in matrix ) ) def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if matrix_dimensions(lowerCamelCase ) == (2, 2): return default_matrix_multiplication(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase ) __lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase ) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(lowerCamelCase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]: __lowerCAmelCase = ( "Unable to multiply these matrices, please check the dimensions.\n" f'''Matrix A: {matrixa}\n''' f'''Matrix B: {matrixa}''' ) raise Exception(lowerCamelCase ) __lowerCAmelCase = matrix_dimensions(lowerCamelCase ) __lowerCAmelCase = matrix_dimensions(lowerCamelCase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase ) __lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) ) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , lowerCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase ) # Removing the additional zeros for i in range(0 , lowerCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": lowerCAmelCase : Tuple = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
39
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class UpperCAmelCase__ ( unittest.TestCase ): def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=18 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , UpperCamelCase=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , UpperCamelCase=True , ) -> List[Any]: __lowerCAmelCase = size if size is not None else {"height": 224, "width": 224} __lowerCAmelCase = crop_size if crop_size is not None else {"height": 18, "width": 18} __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = image_size __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = do_center_crop __lowerCAmelCase = crop_size __lowerCAmelCase = do_normalize __lowerCAmelCase = image_mean __lowerCAmelCase = image_std __lowerCAmelCase = do_convert_rgb def UpperCAmelCase_ ( self ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCAmelCase_ ( self , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]: assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __lowerCAmelCase = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: __lowerCAmelCase = [] for i in range(self.batch_size ): __lowerCAmelCase , __lowerCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs] if torchify: __lowerCAmelCase = [torch.from_numpy(UpperCamelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : Any = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase ) @property def UpperCAmelCase_ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) ) self.assertTrue(hasattr(UpperCamelCase , "size" ) ) self.assertTrue(hasattr(UpperCamelCase , "do_center_crop" ) ) self.assertTrue(hasattr(UpperCamelCase , "center_crop" ) ) self.assertTrue(hasattr(UpperCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(UpperCamelCase , "image_mean" ) ) self.assertTrue(hasattr(UpperCamelCase , "image_std" ) ) self.assertTrue(hasattr(UpperCamelCase , "do_convert_rgb" ) ) def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 224, "width": 224} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) __lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def UpperCAmelCase_ ( self ) -> Optional[int]: pass def UpperCAmelCase_ ( self ) -> Any: # Initialize image_processing __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def UpperCAmelCase_ ( self ) -> List[str]: # Initialize image_processing __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase , numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def UpperCAmelCase_ ( self ) -> Optional[Any]: # Initialize image_processing __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) @require_torch @require_vision class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : int = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase ) __lowerCAmelCase = 3 @property def UpperCAmelCase_ ( self ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) ) self.assertTrue(hasattr(UpperCamelCase , "size" ) ) self.assertTrue(hasattr(UpperCamelCase , "do_center_crop" ) ) self.assertTrue(hasattr(UpperCamelCase , "center_crop" ) ) self.assertTrue(hasattr(UpperCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(UpperCamelCase , "image_mean" ) ) self.assertTrue(hasattr(UpperCamelCase , "image_std" ) ) self.assertTrue(hasattr(UpperCamelCase , "do_convert_rgb" ) ) def UpperCAmelCase_ ( self ) -> Tuple: pass def UpperCAmelCase_ ( self ) -> str: # Initialize image_processing __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
706
'''simple docstring''' import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput lowerCAmelCase : Optional[Any] = '''scheduler_config.json''' class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = 1 a : Optional[int] = 2 a : int = 3 a : Union[str, Any] = 4 a : int = 5 a : Optional[int] = 6 a : str = 7 a : List[Any] = 8 a : List[str] = 9 a : List[str] = 1_0 a : int = 1_1 a : Any = 1_2 a : Any = 1_3 a : Tuple = 1_4 @dataclass class UpperCAmelCase__ ( UpperCamelCase__ ): a : torch.FloatTensor class UpperCAmelCase__ : a : Tuple = SCHEDULER_CONFIG_NAME a : Union[str, Any] = [] a : str = True @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict: self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def UpperCAmelCase_ ( self ) -> str: return self._get_compatibles() @classmethod def UpperCAmelCase_ ( cls ) -> Tuple: __lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) ) __lowerCAmelCase = importlib.import_module(__name__.split("." )[0] ) __lowerCAmelCase = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
39
0
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __lowerCAmelCase ( lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = checkpoints.load_tax_checkpoint(lowerCamelCase ) __lowerCAmelCase = flatten_dict(lowerCamelCase ) return flax_params def __lowerCAmelCase ( lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = {} __lowerCAmelCase = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } __lowerCAmelCase = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __lowerCAmelCase = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __lowerCAmelCase = new_key.replace(lowerCamelCase , lowerCamelCase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __lowerCAmelCase = new_key.replace(lowerCamelCase , lowerCamelCase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __lowerCAmelCase = re.sub(r"layers_(\d+)" , r"layer.\1" , lowerCamelCase ) __lowerCAmelCase = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __lowerCAmelCase = re.sub(r"layers_(\d+)" , r"layer.\1" , lowerCamelCase ) __lowerCAmelCase = flax_dict[key] __lowerCAmelCase = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __lowerCAmelCase = torch.from_numpy(converted_dict[key].T ) else: __lowerCAmelCase = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : int=False ): '''simple docstring''' __lowerCAmelCase = get_flax_param(lowerCamelCase ) if not use_large: __lowerCAmelCase = PixaStructVisionConfig() __lowerCAmelCase = PixaStructTextConfig() else: __lowerCAmelCase = PixaStructVisionConfig( hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 ) __lowerCAmelCase = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 ) __lowerCAmelCase = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCamelCase ) __lowerCAmelCase = PixaStructForConditionalGeneration(lowerCamelCase ) __lowerCAmelCase = rename_and_convert_flax_params(lowerCamelCase ) model.load_state_dict(lowerCamelCase ) __lowerCAmelCase = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) __lowerCAmelCase = PixaStructImageProcessor() __lowerCAmelCase = PixaStructProcessor(image_processor=lowerCamelCase , tokenizer=lowerCamelCase ) if use_large: __lowerCAmelCase = 40_96 __lowerCAmelCase = True # mkdir if needed os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) print("Model saved in {}".format(lowerCamelCase ) ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') lowerCAmelCase : Optional[Any] = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
707
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger lowerCAmelCase : List[Any] = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , UpperCamelCase = None ) -> Union[str, Any]: __lowerCAmelCase = ( os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __lowerCAmelCase = Extractor def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __lowerCAmelCase = os.path.abspath(UpperCamelCase ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool: return force_extract or ( not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase )) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str: __lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase ) if not extractor_format: return input_path __lowerCAmelCase = self._get_output_path(UpperCamelCase ) if self._do_extract(UpperCamelCase , UpperCamelCase ): self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return output_path class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod @abstractmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: ... @staticmethod @abstractmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: ... class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): a : List[bytes] = [] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]: with open(UpperCamelCase , "rb" ) as f: return f.read(UpperCamelCase ) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if not magic_number: __lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) try: __lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase ) except OSError: return False return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: return tarfile.is_tarfile(UpperCamelCase ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: def resolved(UpperCamelCase ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase ) ) def badpath(UpperCamelCase , UpperCamelCase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase ) def badlink(UpperCamelCase , UpperCamelCase ) -> bool: # Links are interpreted relative to the directory containing the link __lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase ) __lowerCAmelCase = resolved(UpperCamelCase ) for finfo in members: if badpath(finfo.name , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = tarfile.open(UpperCamelCase ) tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x1F\x8B"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with gzip.open(UpperCamelCase , "rb" ) as gzip_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[Any] = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase , "rb" ) as fp: __lowerCAmelCase = _EndRecData(UpperCamelCase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be if len(UpperCamelCase ) == sizeCentralDir: __lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file: zip_file.extractall(UpperCamelCase ) zip_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with lzma.open(UpperCamelCase ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile" ) import rarfile os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = rarfile.RarFile(UpperCamelCase ) rf.extractall(UpperCamelCase ) rf.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : int = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard" ) import zstandard as zstd __lowerCAmelCase = zstd.ZstdDecompressor() with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh: dctx.copy_stream(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x42\x5A\x68"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with bza.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr" ) import pyazr os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive: archive.extractall(UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x04\x22\x4D\x18"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4" ) import lza.frame with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) a : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def UpperCAmelCase_ ( cls ) -> Optional[Any]: return max( len(UpperCamelCase ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase , UpperCamelCase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase ) except OSError: return b"" @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool: warnings.warn( "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'infer_extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/> __lowerCAmelCase = cls._get_magic_number_max_length() __lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return extractor_format @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase ) # Prevent parallel extractions __lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) ) with FileLock(UpperCamelCase ): shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg warnings.warn( "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format else: __lowerCAmelCase = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase , UpperCamelCase ) else: warnings.warn( "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0." , category=UpperCamelCase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase ): return extractor.extract(UpperCamelCase , UpperCamelCase )
39
0
'''simple docstring''' from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {} lowerCAmelCase : Dict[Optional[str], str] = {} lowerCAmelCase : Dict[Optional[str], Exception] = {} def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __lowerCAmelCase = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __lowerCAmelCase = format_type def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __lowerCAmelCase = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def __lowerCAmelCase ( lowerCamelCase : Optional[str] ): '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = get_format_type_from_alias(lowerCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
708
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self ) -> List[str]: # test for the above condition self.test() def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = 0 __lowerCAmelCase = False while not completed: if counter == 1: self.reset() __lowerCAmelCase = self.advance() if not self.does_advance(UpperCamelCase ): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase ) counter += 1 if counter > 1_0000: raise Exception("update() does not fulfill the constraint." ) if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly." ) @abstractmethod def UpperCAmelCase_ ( self ) -> Dict: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> Dict: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) __lowerCAmelCase = token_ids __lowerCAmelCase = len(self.token_ids ) __lowerCAmelCase = -1 # the index of the currently fulfilled step __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> Optional[int]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.fulfilled_idx += 1 __lowerCAmelCase = True if self.fulfilled_idx == (self.seqlen - 1): __lowerCAmelCase = True __lowerCAmelCase = completed else: # failed to make progress. __lowerCAmelCase = True self.reset() return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = False __lowerCAmelCase = 0 def UpperCAmelCase_ ( self ) -> Optional[int]: return self.seqlen - (self.fulfilled_idx + 1) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]: __lowerCAmelCase = PhrasalConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.fulfilled_idx __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]: __lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] ) __lowerCAmelCase = {} for token_ids in nested_token_ids: __lowerCAmelCase = root for tidx, token_id in enumerate(UpperCamelCase ): if token_id not in level: __lowerCAmelCase = {} __lowerCAmelCase = level[token_id] if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" F''' {nested_token_ids}.''' ) __lowerCAmelCase = root def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: __lowerCAmelCase = self.trie for current_token in current_seq: __lowerCAmelCase = start[current_token] __lowerCAmelCase = list(start.keys() ) return next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: __lowerCAmelCase = self.next_tokens(UpperCamelCase ) return len(UpperCamelCase ) == 0 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = list(root.values() ) if len(UpperCamelCase ) == 0: return 1 else: return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: __lowerCAmelCase = self.count_leaves(UpperCamelCase ) return len(UpperCamelCase ) != leaf_count class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> List[Any]: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) __lowerCAmelCase = DisjunctiveTrie(UpperCamelCase ) __lowerCAmelCase = nested_token_ids __lowerCAmelCase = self.trie.max_height __lowerCAmelCase = [] __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.current_seq.append(UpperCamelCase ) __lowerCAmelCase = True else: __lowerCAmelCase = True self.reset() __lowerCAmelCase = self.trie.reached_leaf(self.current_seq ) __lowerCAmelCase = completed return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = False __lowerCAmelCase = [] def UpperCAmelCase_ ( self ) -> int: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]: __lowerCAmelCase = DisjunctiveConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.current_seq __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = constraints # max # of steps required to fulfill a given constraint __lowerCAmelCase = max([c.seqlen for c in constraints] ) __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = False self.init_state() def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = [] __lowerCAmelCase = None __lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints] def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __lowerCAmelCase = constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) else: __lowerCAmelCase = self.inprogress_constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) __lowerCAmelCase , __lowerCAmelCase = False, False if self.completed: __lowerCAmelCase = True __lowerCAmelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) ) __lowerCAmelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __lowerCAmelCase = None if len(self.pending_constraints ) == 0: # we're done! __lowerCAmelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(UpperCamelCase ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase ) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(UpperCamelCase ) __lowerCAmelCase = None if not complete and stepped: __lowerCAmelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __lowerCAmelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __lowerCAmelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str: __lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __lowerCAmelCase = [ constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase ) __lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
39
0
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=3 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=10 , UpperCamelCase=[8, 16, 32, 64] , UpperCamelCase=[1, 1, 2, 1] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=3 , UpperCamelCase=None , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=[2, 3, 4] , UpperCamelCase=1 , ) -> List[Any]: __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = num_channels __lowerCAmelCase = embeddings_size __lowerCAmelCase = hidden_sizes __lowerCAmelCase = depths __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_act __lowerCAmelCase = num_labels __lowerCAmelCase = scope __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = out_features __lowerCAmelCase = out_indices __lowerCAmelCase = num_groups def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ) -> Dict: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: __lowerCAmelCase = BitModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: __lowerCAmelCase = self.num_labels __lowerCAmelCase = BitForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __lowerCAmelCase = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: __lowerCAmelCase = BitBackbone(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __lowerCAmelCase = model(UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __lowerCAmelCase = None __lowerCAmelCase = BitBackbone(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __lowerCAmelCase = model(UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a : int = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) a : Dict = False a : Optional[Any] = False a : str = False a : List[Any] = False a : str = False def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = BitModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Any: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase_ ( self ) -> Dict: return @unittest.skip(reason="Bit does not output attentions" ) def UpperCAmelCase_ ( self ) -> int: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def UpperCAmelCase_ ( self ) -> int: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def UpperCAmelCase_ ( self ) -> List[Any]: pass def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(UpperCamelCase ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(config=UpperCamelCase ) for name, module in model.named_modules(): if isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def UpperCAmelCase_ ( self ) -> int: def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): __lowerCAmelCase = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) __lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowerCAmelCase = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: __lowerCAmelCase = layer_type __lowerCAmelCase = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCAmelCase = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def UpperCAmelCase_ ( self ) -> List[Any]: pass def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def UpperCAmelCase_ ( self ) -> Tuple: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = BitModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCAmelCase__ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self ) -> str: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**UpperCamelCase ) # verify the logits __lowerCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) __lowerCAmelCase = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 ) ) @require_torch class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : Any = (BitBackbone,) if is_torch_available() else () a : str = BitConfig a : Optional[Any] = False def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = BitModelTester(self )
709
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : List[Any] = KandinskyImgaImgPipeline a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""] a : List[Any] = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", ] a : Any = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a : Union[str, Any] = False @property def UpperCAmelCase_ ( self ) -> int: return 32 @property def UpperCAmelCase_ ( self ) -> List[str]: return 32 @property def UpperCAmelCase_ ( self ) -> Dict: return self.time_input_dim @property def UpperCAmelCase_ ( self ) -> int: return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self ) -> int: return 100 @property def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) __lowerCAmelCase = MultilingualCLIP(UpperCamelCase ) __lowerCAmelCase = text_encoder.eval() return text_encoder @property def UpperCAmelCase_ ( self ) -> List[str]: torch.manual_seed(0 ) __lowerCAmelCase = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase ) return model @property def UpperCAmelCase_ ( self ) -> List[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self ) -> Dict: torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowerCAmelCase = DDIMScheduler(**UpperCamelCase ) __lowerCAmelCase = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]: __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(UpperCamelCase ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(UpperCamelCase ) else: __lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) __lowerCAmelCase = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**UpperCamelCase ) __lowerCAmelCase = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowerCAmelCase = "A red cartoon frog, 4k" __lowerCAmelCase = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase ) __lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(UpperCamelCase ) pipeline.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase = pipeline( UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
39
0
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP lowerCAmelCase : str = False try: lowerCAmelCase : int = _is_package_available('''google.colab''') except ModuleNotFoundError: pass @input.register class UpperCAmelCase__ : def __init__( self , UpperCamelCase = None , UpperCamelCase = [] ) -> Optional[int]: __lowerCAmelCase = 0 __lowerCAmelCase = choices __lowerCAmelCase = prompt if sys.platform == "win32": __lowerCAmelCase = "*" else: __lowerCAmelCase = "➔ " def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = "" ) -> Union[str, Any]: if sys.platform != "win32": writeColor(self.choices[index] , 32 , UpperCamelCase ) else: forceWrite(self.choices[index] , UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: if index == self.position: forceWrite(F''' {self.arrow_char} ''' ) self.write_choice(UpperCamelCase ) else: forceWrite(F''' {self.choices[index]}''' ) reset_cursor() def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = 1 ) -> Union[str, Any]: __lowerCAmelCase = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(UpperCamelCase ) move_cursor(UpperCamelCase , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def UpperCAmelCase_ ( self ) -> Tuple: self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def UpperCAmelCase_ ( self ) -> Tuple: self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def UpperCAmelCase_ ( self ) -> Any: move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def UpperCAmelCase_ ( self ) -> Optional[Any]: move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(UpperCamelCase )] for number in range(10 )] ) def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = int(chr(self.current_selection ) ) __lowerCAmelCase = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , UpperCamelCase ) else: return else: return def UpperCAmelCase_ ( self , UpperCamelCase = 0 ) -> str: if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) __lowerCAmelCase = default_choice for i in range(len(self.choices ) ): self.print_choice(UpperCamelCase ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: __lowerCAmelCase = int(builtins.input() ) except ValueError: __lowerCAmelCase = default_choice else: __lowerCAmelCase = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(UpperCamelCase , "\n" ) return choice
710
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') lowerCAmelCase : Any = logging.getLogger(__name__) @dataclass class UpperCAmelCase__ : a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a : bool = field( default=UpperCamelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class UpperCAmelCase__ : a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) a : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : bool = field( default=UpperCamelCase__ , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def UpperCAmelCase_ ( self ) -> Tuple: if self.train_file is not None: __lowerCAmelCase = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __lowerCAmelCase = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class UpperCAmelCase__ : a : PreTrainedTokenizerBase a : Union[bool, str, PaddingStrategy] = True a : Optional[int] = None a : Optional[int] = None def __call__( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = "label" if "label" in features[0].keys() else "labels" __lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features] __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = len(features[0]["input_ids"] ) __lowerCAmelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features ] __lowerCAmelCase = list(chain(*UpperCamelCase ) ) __lowerCAmelCase = self.tokenizer.pad( UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten __lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()} # Add back labels __lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa ) return batch def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowerCamelCase ) datasets.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __lowerCAmelCase = {} if data_args.train_file is not None: __lowerCAmelCase = data_args.train_file if data_args.validation_file is not None: __lowerCAmelCase = data_args.validation_file __lowerCAmelCase = data_args.train_file.split("." )[-1] __lowerCAmelCase = load_dataset( lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. __lowerCAmelCase = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. __lowerCAmelCase = [f'''ending{i}''' for i in range(4 )] __lowerCAmelCase = "sent1" __lowerCAmelCase = "sent2" if data_args.max_seq_length is None: __lowerCAmelCase = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) __lowerCAmelCase = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(lowerCamelCase : Tuple ): __lowerCAmelCase = [[context] * 4 for context in examples[context_name]] __lowerCAmelCase = examples[question_header_name] __lowerCAmelCase = [ [f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase ) ] # Flatten out __lowerCAmelCase = list(chain(*lowerCamelCase ) ) __lowerCAmelCase = list(chain(*lowerCamelCase ) ) # Tokenize __lowerCAmelCase = tokenizer( lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) __lowerCAmelCase = raw_datasets["train"] if data_args.max_train_samples is not None: __lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples ) __lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): __lowerCAmelCase = train_dataset.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) __lowerCAmelCase = raw_datasets["validation"] if data_args.max_eval_samples is not None: __lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples ) __lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): __lowerCAmelCase = eval_dataset.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator __lowerCAmelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(lowerCamelCase : Dict ): __lowerCAmelCase , __lowerCAmelCase = eval_predictions __lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase ) trainer.save_model() # Saves the tokenizer too for easy upload __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase ) ) __lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics("train" , lowerCamelCase ) trainer.save_metrics("train" , lowerCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowerCAmelCase = trainer.evaluate() __lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase ) __lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics("eval" , lowerCamelCase ) trainer.save_metrics("eval" , lowerCamelCase ) __lowerCAmelCase = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**lowerCamelCase ) else: trainer.create_model_card(**lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
39
0
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = [] for part_id in partition_order: __lowerCAmelCase = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(lowerCamelCase ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() __lowerCAmelCase = spark.range(1_00 ).repartition(1 ) __lowerCAmelCase = Spark(lowerCamelCase ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() __lowerCAmelCase = spark.range(10 ).repartition(2 ) __lowerCAmelCase = [1, 0] __lowerCAmelCase = _generate_iterable_examples(lowerCamelCase , lowerCamelCase ) # Reverse the partitions. __lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , lowerCamelCase ) for i, (row_id, row_dict) in enumerate(generate_fn() ): __lowerCAmelCase , __lowerCAmelCase = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() __lowerCAmelCase = spark.range(10 ).repartition(1 ) __lowerCAmelCase = SparkExamplesIterable(lowerCamelCase ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(lowerCamelCase ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() __lowerCAmelCase = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: __lowerCAmelCase = lambda lowerCamelCase : x.reverse() __lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , [2, 1, 0] ) __lowerCAmelCase = SparkExamplesIterable(lowerCamelCase ).shuffle_data_sources(lowerCamelCase ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(lowerCamelCase ): __lowerCAmelCase , __lowerCAmelCase = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() __lowerCAmelCase = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 __lowerCAmelCase = SparkExamplesIterable(lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 __lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , [0, 2] ) for i, (row_id, row_dict) in enumerate(lowerCamelCase ): __lowerCAmelCase , __lowerCAmelCase = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 __lowerCAmelCase = SparkExamplesIterable(lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 __lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , [1, 3] ) for i, (row_id, row_dict) in enumerate(lowerCamelCase ): __lowerCAmelCase , __lowerCAmelCase = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() __lowerCAmelCase = spark.range(1_00 ).repartition(1 ) __lowerCAmelCase = Spark(lowerCamelCase ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 1_00
711
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {} lowerCAmelCase : Dict[Optional[str], str] = {} lowerCAmelCase : Dict[Optional[str], Exception] = {} def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __lowerCAmelCase = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __lowerCAmelCase = format_type def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __lowerCAmelCase = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def __lowerCAmelCase ( lowerCamelCase : Optional[str] ): '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = get_format_type_from_alias(lowerCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
39
0
'''simple docstring''' from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
712
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCAmelCase ( lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowerCAmelCase = [3, 3, 3, 3] __lowerCAmelCase = [5, 5, 5, 5] elif "fl4" in model_name: __lowerCAmelCase = [4, 4, 4, 4] __lowerCAmelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowerCAmelCase = [3, 3, 3, 3] if "lrf" in model_name: __lowerCAmelCase = [3, 3, 3, 3] else: __lowerCAmelCase = [2, 2, 2, 2] if "tiny" in model_name: __lowerCAmelCase = 96 elif "small" in model_name: __lowerCAmelCase = 96 elif "base" in model_name: __lowerCAmelCase = 1_28 elif "large" in model_name: __lowerCAmelCase = 1_92 elif "xlarge" in model_name: __lowerCAmelCase = 2_56 elif "huge" in model_name: __lowerCAmelCase = 3_52 # set label information __lowerCAmelCase = "huggingface/label-files" if "large" in model_name or "huge" in model_name: __lowerCAmelCase = "imagenet-22k-id2label.json" else: __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) ) __lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()} __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = FocalNetConfig( embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , ) return config def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' if "patch_embed.proj" in name: __lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowerCAmelCase = "encoder." + name if "encoder.layers" in name: __lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: __lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: __lowerCAmelCase = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": __lowerCAmelCase = "layernorm.weight" if name == "norm.bias": __lowerCAmelCase = "layernorm.bias" if "head" in name: __lowerCAmelCase = name.replace("head" , "classifier" ) else: __lowerCAmelCase = "focalnet." + name return name def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ): '''simple docstring''' __lowerCAmelCase = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on __lowerCAmelCase = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase ) __lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): __lowerCAmelCase = state_dict.pop(lowerCamelCase ) __lowerCAmelCase = val __lowerCAmelCase = get_focalnet_config(lowerCamelCase ) __lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase ) model.eval() # load state dict model.load_state_dict(lowerCamelCase ) # verify conversion __lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCAmelCase = BitImageProcessor( do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , ) __lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) __lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" ) __lowerCAmelCase = transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) __lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 ) __lowerCAmelCase = model(**lowerCamelCase ) __lowerCAmelCase = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": __lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": __lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": __lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": __lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": __lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: print(f'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(f'''{model_name}''' ) processor.push_to_hub(f'''{model_name}''' ) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase : Optional[int] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
39
0
'''simple docstring''' from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase : List[Any] = ''' Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1") >>> pipe.to("cuda") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save("cat.png") ``` ''' def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Any=8 ): '''simple docstring''' __lowerCAmelCase = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 __lowerCAmelCase = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Any: super().__init__() self.register_modules( text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , movq=UpperCamelCase , ) __lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: if latents is None: __lowerCAmelCase = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) __lowerCAmelCase = latents.to(UpperCamelCase ) __lowerCAmelCase = latents * scheduler.init_noise_sigma return latents def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , ) -> Any: __lowerCAmelCase = len(UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else 1 # get prompt text embeddings __lowerCAmelCase = self.tokenizer( UpperCamelCase , padding="max_length" , truncation=UpperCamelCase , max_length=77 , return_attention_mask=UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors="pt" , ) __lowerCAmelCase = text_inputs.input_ids __lowerCAmelCase = self.tokenizer(UpperCamelCase , padding="longest" , return_tensors="pt" ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) __lowerCAmelCase = text_input_ids.to(UpperCamelCase ) __lowerCAmelCase = text_inputs.attention_mask.to(UpperCamelCase ) __lowerCAmelCase , __lowerCAmelCase = self.text_encoder( input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) __lowerCAmelCase = prompt_embeds.repeat_interleave(UpperCamelCase , dim=0 ) __lowerCAmelCase = text_encoder_hidden_states.repeat_interleave(UpperCamelCase , dim=0 ) __lowerCAmelCase = text_mask.repeat_interleave(UpperCamelCase , dim=0 ) if do_classifier_free_guidance: __lowerCAmelCase = 42 if negative_prompt is None: __lowerCAmelCase = [""] * batch_size elif type(UpperCamelCase ) is not type(UpperCamelCase ): raise TypeError( F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase )} !=''' F''' {type(UpperCamelCase )}.''' ) elif isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = [negative_prompt] elif batch_size != len(UpperCamelCase ): raise ValueError( F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase )}, but `prompt`:''' F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`." ) else: __lowerCAmelCase = negative_prompt __lowerCAmelCase = self.tokenizer( UpperCamelCase , padding="max_length" , max_length=77 , truncation=UpperCamelCase , return_attention_mask=UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors="pt" , ) __lowerCAmelCase = uncond_input.input_ids.to(UpperCamelCase ) __lowerCAmelCase = uncond_input.attention_mask.to(UpperCamelCase ) __lowerCAmelCase , __lowerCAmelCase = self.text_encoder( input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __lowerCAmelCase = negative_prompt_embeds.shape[1] __lowerCAmelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase ) __lowerCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase ) __lowerCAmelCase = uncond_text_encoder_hidden_states.shape[1] __lowerCAmelCase = uncond_text_encoder_hidden_states.repeat(1 , UpperCamelCase , 1 ) __lowerCAmelCase = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , UpperCamelCase , -1 ) __lowerCAmelCase = uncond_text_mask.repeat_interleave(UpperCamelCase , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowerCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) __lowerCAmelCase = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) __lowerCAmelCase = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def UpperCAmelCase_ ( self , UpperCamelCase=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) __lowerCAmelCase = torch.device(F'''cuda:{gpu_id}''' ) __lowerCAmelCase = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase=0 ) -> Any: if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) __lowerCAmelCase = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __lowerCAmelCase = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: __lowerCAmelCase , __lowerCAmelCase = cpu_offload_with_hook(UpperCamelCase , UpperCamelCase , prev_module_hook=UpperCamelCase ) if self.safety_checker is not None: __lowerCAmelCase , __lowerCAmelCase = cpu_offload_with_hook(self.safety_checker , UpperCamelCase , prev_module_hook=UpperCamelCase ) # We'll offload the last model manually. __lowerCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase_ ( self ) -> Dict: if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 100 , UpperCamelCase = 4.0 , UpperCamelCase = 1 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , ) -> int: if isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = 1 elif isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = len(UpperCamelCase ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase )}''' ) __lowerCAmelCase = self._execution_device __lowerCAmelCase = batch_size * num_images_per_prompt __lowerCAmelCase = guidance_scale > 1.0 __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._encode_prompt( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = torch.cat(UpperCamelCase , dim=0 ) if isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = torch.cat(UpperCamelCase , dim=0 ) if do_classifier_free_guidance: __lowerCAmelCase = image_embeds.repeat_interleave(UpperCamelCase , dim=0 ) __lowerCAmelCase = negative_image_embeds.repeat_interleave(UpperCamelCase , dim=0 ) __lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=UpperCamelCase ) self.scheduler.set_timesteps(UpperCamelCase , device=UpperCamelCase ) __lowerCAmelCase = self.scheduler.timesteps __lowerCAmelCase = self.unet.config.in_channels __lowerCAmelCase , __lowerCAmelCase = get_new_h_w(UpperCamelCase , UpperCamelCase , self.movq_scale_factor ) # create initial latent __lowerCAmelCase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCamelCase , UpperCamelCase , UpperCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance __lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowerCAmelCase = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} __lowerCAmelCase = self.unet( sample=UpperCamelCase , timestep=UpperCamelCase , encoder_hidden_states=UpperCamelCase , added_cond_kwargs=UpperCamelCase , return_dict=UpperCamelCase , )[0] if do_classifier_free_guidance: __lowerCAmelCase , __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) __lowerCAmelCase , __lowerCAmelCase = noise_pred.chunk(2 ) __lowerCAmelCase , __lowerCAmelCase = variance_pred.chunk(2 ) __lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __lowerCAmelCase , __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCAmelCase = self.scheduler.step( UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase , ).prev_sample # post-processing __lowerCAmelCase = self.movq.decode(UpperCamelCase , force_not_quantize=UpperCamelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: __lowerCAmelCase = image * 0.5 + 0.5 __lowerCAmelCase = image.clamp(0 , 1 ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowerCAmelCase = self.numpy_to_pil(UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase )
713
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase : str = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase : Optional[Any] = { '''squeezebert/squeezebert-uncased''': 5_1_2, '''squeezebert/squeezebert-mnli''': 5_1_2, '''squeezebert/squeezebert-mnli-headless''': 5_1_2, } lowerCAmelCase : Tuple = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Dict = VOCAB_FILES_NAMES a : Any = PRETRAINED_VOCAB_FILES_MAP a : Dict = PRETRAINED_INIT_CONFIGURATION a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Optional[Any] = SqueezeBertTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]: super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars ): __lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) ) __lowerCAmelCase = do_lower_case __lowerCAmelCase = strip_accents __lowerCAmelCase = tokenize_chinese_chars __lowerCAmelCase = normalizer_class(**UpperCamelCase ) __lowerCAmelCase = do_lower_case def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str: __lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]: __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]: __lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase )
39
0
'''simple docstring''' import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): a : List[str] = 1 @register_to_config def __init__( self , UpperCamelCase=2000 , UpperCamelCase=0.1 , UpperCamelCase=20 , UpperCamelCase=1E-3 ) -> Any: __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Optional[int]: __lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase , device=UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> Tuple: if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score __lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) __lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) __lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): __lowerCAmelCase = std.unsqueeze(-1 ) __lowerCAmelCase = -score / std # compute __lowerCAmelCase = -1.0 / len(self.timesteps ) __lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) __lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): __lowerCAmelCase = beta_t.unsqueeze(-1 ) __lowerCAmelCase = -0.5 * beta_t * x __lowerCAmelCase = torch.sqrt(UpperCamelCase ) __lowerCAmelCase = drift - diffusion**2 * score __lowerCAmelCase = x + drift * dt # add noise __lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase , device=x.device , dtype=x.dtype ) __lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> List[Any]: return self.config.num_train_timesteps
714
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(lowerCamelCase ) / len(lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
39
0
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : bool = False ): '''simple docstring''' if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable: raise ValueError( "Warning: upper bound of deterministic test is exceeded. " "Pass allow_probable=True to allow probabilistic test. " "A return value of True indicates a probable prime." ) # array bounds provided by analysis __lowerCAmelCase = [ 20_47, 1_37_36_53, 25_32_60_01, 32_15_03_17_51, 2_15_23_02_89_87_47, 3_47_47_49_66_03_83, 3_41_55_00_71_72_83_21, 1, 3_82_51_23_05_65_46_41_30_51, 1, 1, 31_86_65_85_78_34_03_11_51_16_74_61, 3_31_70_44_06_46_79_88_73_85_96_19_81, ] __lowerCAmelCase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(lowerCamelCase , 1 ): if n < _p: # then we have our last prime to check __lowerCAmelCase = primes[:idx] break __lowerCAmelCase , __lowerCAmelCase = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: __lowerCAmelCase = False for r in range(lowerCamelCase ): __lowerCAmelCase = pow(lowerCamelCase , d * 2**r , lowerCamelCase ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): __lowerCAmelCase = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def __lowerCAmelCase ( ): '''simple docstring''' assert not miller_rabin(5_61 ) assert miller_rabin(5_63 ) # 2047 assert not miller_rabin(83_82_01 ) assert miller_rabin(83_82_07 ) # 1_373_653 assert not miller_rabin(17_31_60_01 ) assert miller_rabin(17_31_60_17 ) # 25_326_001 assert not miller_rabin(30_78_38_66_41 ) assert miller_rabin(30_78_38_66_53 ) # 3_215_031_751 assert not miller_rabin(1_71_30_45_57_48_01 ) assert miller_rabin(1_71_30_45_57_48_19 ) # 2_152_302_898_747 assert not miller_rabin(2_77_97_99_72_83_07 ) assert miller_rabin(2_77_97_99_72_83_27 ) # 3_474_749_660_383 assert not miller_rabin(1_13_85_00_23_90_94_41 ) assert miller_rabin(1_13_85_00_23_90_95_27 ) # 341_550_071_728_321 assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 ) assert miller_rabin(1_27_50_41_01_88_48_80_43_91 ) # 3_825_123_056_546_413_051 assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 ) assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 ) assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
715
'''simple docstring''' import re def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' __lowerCAmelCase = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
39
0
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): a : Optional[int] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = 5_0257 , UpperCamelCase = 1024 , UpperCamelCase = 768 , UpperCamelCase = 12 , UpperCamelCase = 12 , UpperCamelCase = None , UpperCamelCase = "gelu_new" , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 1E-5 , UpperCamelCase = 0.02 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = False , UpperCamelCase = False , ) -> Tuple: super().__init__() __lowerCAmelCase = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) __lowerCAmelCase = prefix_inner_dim __lowerCAmelCase = prefix_hidden_dim __lowerCAmelCase = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) __lowerCAmelCase = ( nn.Linear(self.prefix_hidden_dim , UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity() ) __lowerCAmelCase = GPTaConfig( vocab_size=UpperCamelCase , n_positions=UpperCamelCase , n_embd=UpperCamelCase , n_layer=UpperCamelCase , n_head=UpperCamelCase , n_inner=UpperCamelCase , activation_function=UpperCamelCase , resid_pdrop=UpperCamelCase , embd_pdrop=UpperCamelCase , attn_pdrop=UpperCamelCase , layer_norm_epsilon=UpperCamelCase , initializer_range=UpperCamelCase , scale_attn_weights=UpperCamelCase , use_cache=UpperCamelCase , scale_attn_by_inverse_layer_idx=UpperCamelCase , reorder_and_upcast_attn=UpperCamelCase , ) __lowerCAmelCase = GPTaLMHeadModel(UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , ) -> str: __lowerCAmelCase = self.transformer.transformer.wte(UpperCamelCase ) __lowerCAmelCase = self.encode_prefix(UpperCamelCase ) __lowerCAmelCase = self.decode_prefix(UpperCamelCase ) __lowerCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: __lowerCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) __lowerCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 ) __lowerCAmelCase = self.transformer(inputs_embeds=UpperCamelCase , labels=UpperCamelCase , attention_mask=UpperCamelCase ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> torch.Tensor: return torch.zeros(UpperCamelCase , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[Any]: return self.encode_prefix(UpperCamelCase ) @torch.no_grad() def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = torch.split(UpperCamelCase , 1 , dim=0 ) __lowerCAmelCase = [] __lowerCAmelCase = [] for feature in features: __lowerCAmelCase = self.decode_prefix(feature.to(UpperCamelCase ) ) # back to the clip feature # Only support beam search for now __lowerCAmelCase , __lowerCAmelCase = self.generate_beam( input_embeds=UpperCamelCase , device=UpperCamelCase , eos_token_id=UpperCamelCase ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) __lowerCAmelCase = torch.stack(UpperCamelCase ) __lowerCAmelCase = torch.stack(UpperCamelCase ) return generated_tokens, generated_seq_lengths @torch.no_grad() def UpperCAmelCase_ ( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase = 5 , UpperCamelCase = 67 , UpperCamelCase = 1.0 , UpperCamelCase = None , ) -> str: __lowerCAmelCase = eos_token_id __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = torch.ones(UpperCamelCase , device=UpperCamelCase , dtype=torch.int ) __lowerCAmelCase = torch.zeros(UpperCamelCase , device=UpperCamelCase , dtype=torch.bool ) if input_embeds is not None: __lowerCAmelCase = input_embeds else: __lowerCAmelCase = self.transformer.transformer.wte(UpperCamelCase ) for i in range(UpperCamelCase ): __lowerCAmelCase = self.transformer(inputs_embeds=UpperCamelCase ) __lowerCAmelCase = outputs.logits __lowerCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) __lowerCAmelCase = logits.softmax(-1 ).log() if scores is None: __lowerCAmelCase , __lowerCAmelCase = logits.topk(UpperCamelCase , -1 ) __lowerCAmelCase = generated.expand(UpperCamelCase , *generated.shape[1:] ) __lowerCAmelCase , __lowerCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: __lowerCAmelCase = next_tokens else: __lowerCAmelCase = tokens.expand(UpperCamelCase , *tokens.shape[1:] ) __lowerCAmelCase = torch.cat((tokens, next_tokens) , dim=1 ) else: __lowerCAmelCase = -float(np.inf ) __lowerCAmelCase = 0 __lowerCAmelCase = scores[:, None] + logits seq_lengths[~is_stopped] += 1 __lowerCAmelCase = scores_sum / seq_lengths[:, None] __lowerCAmelCase , __lowerCAmelCase = scores_sum_average.view(-1 ).topk(UpperCamelCase , -1 ) __lowerCAmelCase = next_tokens // scores_sum.shape[1] __lowerCAmelCase = seq_lengths[next_tokens_source] __lowerCAmelCase = next_tokens % scores_sum.shape[1] __lowerCAmelCase = next_tokens.unsqueeze(1 ) __lowerCAmelCase = tokens[next_tokens_source] __lowerCAmelCase = torch.cat((tokens, next_tokens) , dim=1 ) __lowerCAmelCase = generated[next_tokens_source] __lowerCAmelCase = scores_sum_average * seq_lengths __lowerCAmelCase = is_stopped[next_tokens_source] __lowerCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) __lowerCAmelCase = torch.cat((generated, next_token_embed) , dim=1 ) __lowerCAmelCase = is_stopped + next_tokens.eq(UpperCamelCase ).squeeze() if is_stopped.all(): break __lowerCAmelCase = scores / seq_lengths __lowerCAmelCase = scores.argsort(descending=UpperCamelCase ) # tokens tensors are already padded to max_seq_length __lowerCAmelCase = [tokens[i] for i in order] __lowerCAmelCase = torch.stack(UpperCamelCase , dim=0 ) __lowerCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
716
'''simple docstring''' import os import sys import unittest lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = {"BertModelTest": "BertModelTester"} __lowerCAmelCase = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase ) __lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase ) __lowerCAmelCase = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } __lowerCAmelCase = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } __lowerCAmelCase = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
39
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase : Optional[int] = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase : str = { '''roberta-base''': 5_1_2, '''roberta-large''': 5_1_2, '''roberta-large-mnli''': 5_1_2, '''distilroberta-base''': 5_1_2, '''roberta-base-openai-detector''': 5_1_2, '''roberta-large-openai-detector''': 5_1_2, } class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[str] = VOCAB_FILES_NAMES a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Any = ["""input_ids""", """attention_mask"""] a : Union[str, Any] = RobertaTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="replace" , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase=False , UpperCamelCase=True , **UpperCamelCase , ) -> Tuple: super().__init__( UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space: __lowerCAmelCase = getattr(UpperCamelCase , pre_tok_state.pop("type" ) ) __lowerCAmelCase = add_prefix_space __lowerCAmelCase = pre_tok_class(**UpperCamelCase ) __lowerCAmelCase = add_prefix_space __lowerCAmelCase = "post_processor" __lowerCAmelCase = getattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase ) if tokenizer_component_instance: __lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __lowerCAmelCase = tuple(state["sep"] ) if "cls" in state: __lowerCAmelCase = tuple(state["cls"] ) __lowerCAmelCase = False if state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space: __lowerCAmelCase = add_prefix_space __lowerCAmelCase = True if state.get("trim_offsets" , UpperCamelCase ) != trim_offsets: __lowerCAmelCase = trim_offsets __lowerCAmelCase = True if changes_to_apply: __lowerCAmelCase = getattr(UpperCamelCase , state.pop("type" ) ) __lowerCAmelCase = component_class(**UpperCamelCase ) setattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase ) @property def UpperCAmelCase_ ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: __lowerCAmelCase = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else value __lowerCAmelCase = value def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> BatchEncoding: __lowerCAmelCase = kwargs.get("is_split_into_words" , UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase ) def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> BatchEncoding: __lowerCAmelCase = kwargs.get("is_split_into_words" , UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase , **UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]: __lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> Dict: __lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]: __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
717
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class UpperCAmelCase__ ( UpperCamelCase__ ): a : torch.FloatTensor class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]: super().__init__() __lowerCAmelCase = num_attention_heads __lowerCAmelCase = attention_head_dim __lowerCAmelCase = num_attention_heads * attention_head_dim __lowerCAmelCase = in_channels __lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) # 3. Define transformers blocks __lowerCAmelCase = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , ) for d in range(UpperCamelCase ) ] ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape __lowerCAmelCase = batch_frames // num_frames __lowerCAmelCase = hidden_states __lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __lowerCAmelCase = self.norm(UpperCamelCase ) __lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = self.proj_in(UpperCamelCase ) # 2. Blocks for block in self.transformer_blocks: __lowerCAmelCase = block( UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , ) # 3. Output __lowerCAmelCase = self.proj_out(UpperCamelCase ) __lowerCAmelCase = ( hidden_states[None, None, :] .reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=UpperCamelCase )
39
0
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch lowerCAmelCase : Tuple = '''sshleifer/bart-tiny-random''' lowerCAmelCase : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' @require_torch class UpperCAmelCase__ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self ) -> Union[str, Any]: return AutoConfig.from_pretrained(UpperCamelCase ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase , *__lowerCAmelCase = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase , *__lowerCAmelCase = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase , *__lowerCAmelCase = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=UpperCamelCase ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase , *__lowerCAmelCase = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def UpperCAmelCase_ ( self ) -> int: with self.assertRaises(UpperCamelCase ): create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=UpperCamelCase , d=UpperCamelCase )
718
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" __lowerCAmelCase = "f32le" __lowerCAmelCase = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: __lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error __lowerCAmelCase = output_stream[0] __lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" if format_for_conversion == "s16le": __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) __lowerCAmelCase = platform.system() if system == "Linux": __lowerCAmelCase = "alsa" __lowerCAmelCase = "default" elif system == "Darwin": __lowerCAmelCase = "avfoundation" __lowerCAmelCase = ":0" elif system == "Windows": __lowerCAmelCase = "dshow" __lowerCAmelCase = "default" __lowerCAmelCase = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase ) for item in iterator: yield item def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ): '''simple docstring''' if stream_chunk_s is not None: __lowerCAmelCase = stream_chunk_s else: __lowerCAmelCase = chunk_length_s __lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": __lowerCAmelCase = np.intaa __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = np.floataa __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: __lowerCAmelCase = chunk_length_s / 6 __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase , (int, float) ): __lowerCAmelCase = [stride_length_s, stride_length_s] __lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __lowerCAmelCase = datetime.datetime.now() __lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ): # Put everything back in numpy scale __lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase ) __lowerCAmelCase = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) __lowerCAmelCase = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ): '''simple docstring''' __lowerCAmelCase = B"" __lowerCAmelCase , __lowerCAmelCase = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) __lowerCAmelCase = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: __lowerCAmelCase = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator __lowerCAmelCase = (_stride_left, stride_right) __lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride} if stream: __lowerCAmelCase = False yield item __lowerCAmelCase = stride_left __lowerCAmelCase = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: __lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)} if stream: __lowerCAmelCase = False yield item def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process: while True: __lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
39
0
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __lowerCAmelCase = test_metrics @require_cpu def UpperCAmelCase_ ( self ) -> str: debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def UpperCAmelCase_ ( self ) -> Tuple: debug_launcher(self.test_metrics.main ) @require_single_gpu def UpperCAmelCase_ ( self ) -> Optional[int]: self.test_metrics.main() @require_multi_gpu def UpperCAmelCase_ ( self ) -> Optional[Any]: print(F'''Found {torch.cuda.device_count()} devices.''' ) __lowerCAmelCase = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
719
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def __lowerCAmelCase ( lowerCamelCase : List[str] ): '''simple docstring''' return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class UpperCAmelCase__ ( UpperCamelCase__ ): @staticmethod def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple: __lowerCAmelCase = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" ) download_parser.set_defaults(func=UpperCamelCase ) def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: __lowerCAmelCase = model __lowerCAmelCase = cache __lowerCAmelCase = force __lowerCAmelCase = trust_remote_code def UpperCAmelCase_ ( self ) -> Any: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
39
0
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCAmelCase : int = '''naver-clova-ix/donut-base''' class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = DonutProcessor.from_pretrained(UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = { "name": "John Doe", "age": "99", "city": "Atlanta", "state": "GA", "zip": "30301", "phone": "123-4567", "nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}], } __lowerCAmelCase = ( "<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>" "<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>" "<s_nicknames><s_nickname>Johnny</s_nickname>" "<sep/><s_nickname>JD</s_nickname></s_nicknames>" ) __lowerCAmelCase = self.processor.tokenajson(UpperCamelCase ) self.assertDictEqual(UpperCamelCase , UpperCamelCase )
720
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
39
0
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '''--original_config_file''', default=None, type=str, help='''The YAML config file corresponding to the original architecture.''', ) parser.add_argument( '''--num_in_channels''', default=None, type=int, help='''The number of input channels. If `None` number of input channels will be automatically inferred.''', ) parser.add_argument( '''--scheduler_type''', default='''pndm''', type=str, help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''', ) parser.add_argument( '''--pipeline_type''', default=None, type=str, help=( '''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'''' '''. If `None` pipeline will be automatically inferred.''' ), ) parser.add_argument( '''--image_size''', default=None, type=int, help=( '''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2''' ''' Base. Use 768 for Stable Diffusion v2.''' ), ) parser.add_argument( '''--prediction_type''', default=None, type=str, help=( '''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable''' ''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.''' ), ) parser.add_argument( '''--extract_ema''', action='''store_true''', help=( '''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights''' ''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield''' ''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.''' ), ) parser.add_argument( '''--upcast_attention''', action='''store_true''', help=( '''Whether the attention computation should always be upcasted. This is necessary when running stable''' ''' diffusion 2.1.''' ), ) parser.add_argument( '''--from_safetensors''', action='''store_true''', help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''', ) parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') parser.add_argument( '''--stable_unclip''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''', ) parser.add_argument( '''--stable_unclip_prior''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''', ) parser.add_argument( '''--clip_stats_path''', type=str, help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''', required=False, ) parser.add_argument( '''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.''' ) parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--vae_path''', type=str, default=None, required=False, help='''Set to a path, hub id to an already converted vae to not convert it again.''', ) lowerCAmelCase : Union[str, Any] = parser.parse_args() lowerCAmelCase : List[str] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
721
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Optional[Any] = """dpr""" def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple: super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = projection_dim __lowerCAmelCase = position_embedding_type
39
0
'''simple docstring''' from math import factorial def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(lowerCamelCase ) // (factorial(lowerCamelCase ) * factorial(n - k )) if __name__ == "__main__": print( '''The number of five-card hands possible from a standard''', f'fifty-two card deck is: {combinations(5_2, 5)}\n', ) print( '''If a class of 40 students must be arranged into groups of''', f'4 for group projects, there are {combinations(4_0, 4)} ways', '''to arrange them.\n''', ) print( '''If 10 teams are competing in a Formula One race, there''', f'are {combinations(1_0, 3)} ways that first, second and', '''third place can be awarded.''', )
700
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
0
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : int = {'''vocab_file''': '''sentencepiece.model'''} lowerCAmelCase : Tuple = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, } lowerCAmelCase : Optional[int] = { '''google/rembert''': 2_5_6, } class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[Any] = VOCAB_FILES_NAMES a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="[CLS]" , UpperCamelCase="[SEP]" , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , **UpperCamelCase , ) -> Union[str, Any]: super().__init__( do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = do_lower_case __lowerCAmelCase = remove_space __lowerCAmelCase = keep_accents __lowerCAmelCase = vocab_file __lowerCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(UpperCamelCase ) @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return len(self.sp_model ) def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[str]: __lowerCAmelCase = self.__dict__.copy() __lowerCAmelCase = None return state def __setstate__( self , UpperCamelCase ) -> Optional[Any]: __lowerCAmelCase = d __lowerCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=False ) -> Any: __lowerCAmelCase = self.sp_model.EncodeAsPieces(UpperCamelCase ) return pieces def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[Any]: return self.sp_model.PieceToId(UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase ) -> Tuple: return self.sp_model.IdToPiece(UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: __lowerCAmelCase = self.sp_model.decode_pieces(UpperCamelCase ) return out_string def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]: __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]: __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase ): logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase ) ) return __lowerCAmelCase = os.path.join( UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file , UpperCamelCase ) return (out_vocab_file,)
701
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"} __lowerCAmelCase = features.copy() __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' if issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = jsonl_path elif issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = [jsonl_path] __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) for split in splits: __lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' if split: __lowerCAmelCase = {split: jsonl_path} else: __lowerCAmelCase = "train" __lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path} __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ): '''simple docstring''' return json.load(lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' return [json.loads(lowerCamelCase ) for line in buffer] class UpperCAmelCase__ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: with pytest.raises(UpperCamelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' __lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() assert exported_content == original_content
39
0
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" __lowerCAmelCase = "f32le" __lowerCAmelCase = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: __lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error __lowerCAmelCase = output_stream[0] __lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" if format_for_conversion == "s16le": __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) __lowerCAmelCase = platform.system() if system == "Linux": __lowerCAmelCase = "alsa" __lowerCAmelCase = "default" elif system == "Darwin": __lowerCAmelCase = "avfoundation" __lowerCAmelCase = ":0" elif system == "Windows": __lowerCAmelCase = "dshow" __lowerCAmelCase = "default" __lowerCAmelCase = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase ) for item in iterator: yield item def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ): '''simple docstring''' if stream_chunk_s is not None: __lowerCAmelCase = stream_chunk_s else: __lowerCAmelCase = chunk_length_s __lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": __lowerCAmelCase = np.intaa __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = np.floataa __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: __lowerCAmelCase = chunk_length_s / 6 __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase , (int, float) ): __lowerCAmelCase = [stride_length_s, stride_length_s] __lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __lowerCAmelCase = datetime.datetime.now() __lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ): # Put everything back in numpy scale __lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase ) __lowerCAmelCase = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) __lowerCAmelCase = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ): '''simple docstring''' __lowerCAmelCase = B"" __lowerCAmelCase , __lowerCAmelCase = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) __lowerCAmelCase = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: __lowerCAmelCase = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator __lowerCAmelCase = (_stride_left, stride_right) __lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride} if stream: __lowerCAmelCase = False yield item __lowerCAmelCase = stride_left __lowerCAmelCase = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: __lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)} if stream: __lowerCAmelCase = False yield item def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process: while True: __lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
702
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : Optional[Any] = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase : str = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase : Optional[Any] = { '''squeezebert/squeezebert-uncased''': 5_1_2, '''squeezebert/squeezebert-mnli''': 5_1_2, '''squeezebert/squeezebert-mnli-headless''': 5_1_2, } lowerCAmelCase : Tuple = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Dict = VOCAB_FILES_NAMES a : Any = PRETRAINED_VOCAB_FILES_MAP a : Dict = PRETRAINED_INIT_CONFIGURATION a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Optional[Any] = SqueezeBertTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]: super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars ): __lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) ) __lowerCAmelCase = do_lower_case __lowerCAmelCase = strip_accents __lowerCAmelCase = tokenize_chinese_chars __lowerCAmelCase = normalizer_class(**UpperCamelCase ) __lowerCAmelCase = do_lower_case def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str: __lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]: __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]: __lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase )
703
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[str] = (CMStochasticIterativeScheduler,) a : str = 1_0 def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str: __lowerCAmelCase = { "num_train_timesteps": 201, "sigma_min": 0.0_02, "sigma_max": 80.0, } config.update(**UpperCamelCase ) return config def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = 10 __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps[0] __lowerCAmelCase = scheduler.timesteps[1] __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase_ ( self ) -> Any: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = 1 scheduler.set_timesteps(UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCamelCase ): # 1. scale model input __lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # 2. predict noise residual __lowerCAmelCase = model(UpperCamelCase , UpperCamelCase ) # 3. predict previous sample x_t-1 __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [106, 0] scheduler.set_timesteps(timesteps=UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # 2. predict noise residual __lowerCAmelCase = model(UpperCamelCase , UpperCamelCase ) # 3. predict previous sample x_t-1 __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [39, 30, 12, 15, 0] with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [39, 30, 12, 1, 0] __lowerCAmelCase = len(UpperCamelCase ) with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCamelCase )
39
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Any = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = """rwkv""" a : List[Any] = {"""max_position_embeddings""": """context_length"""} def __init__( self , UpperCamelCase=5_0277 , UpperCamelCase=1024 , UpperCamelCase=4096 , UpperCamelCase=32 , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1E-5 , UpperCamelCase=0 , UpperCamelCase=0 , UpperCamelCase=6 , UpperCamelCase=False , UpperCamelCase=True , **UpperCamelCase , ) -> Tuple: __lowerCAmelCase = vocab_size __lowerCAmelCase = context_length __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size __lowerCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = rescale_every __lowerCAmelCase = use_cache __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id super().__init__( tie_word_embeddings=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
704
'''simple docstring''' import requests from bsa import BeautifulSoup def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ): '''simple docstring''' __lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" ) __lowerCAmelCase = soup.findAll("h1" ) __lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f'{key}\n{value}\n')
39
0
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def __lowerCAmelCase ( lowerCamelCase : dict , lowerCamelCase : str , lowerCamelCase : set , lowerCamelCase : set , lowerCamelCase : dict , lowerCamelCase : dict , lowerCamelCase : PriorityQueue , lowerCamelCase : dict , lowerCamelCase : float | int , ): '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue __lowerCAmelCase = cst_fwd.get(lowerCamelCase , np.inf ) __lowerCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __lowerCAmelCase = new_cost_f __lowerCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : dict , lowerCamelCase : dict ): '''simple docstring''' __lowerCAmelCase = -1 __lowerCAmelCase = set() __lowerCAmelCase = set() __lowerCAmelCase = {source: 0} __lowerCAmelCase = {destination: 0} __lowerCAmelCase = {source: None} __lowerCAmelCase = {destination: None} __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __lowerCAmelCase , __lowerCAmelCase = queue_forward.get() visited_forward.add(lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase = queue_backward.get() visited_backward.add(lowerCamelCase ) __lowerCAmelCase = pass_and_relaxation( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) __lowerCAmelCase = pass_and_relaxation( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __lowerCAmelCase = shortest_distance return shortest_path_distance lowerCAmelCase : Optional[int] = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } lowerCAmelCase : List[str] = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
705
'''simple docstring''' from __future__ import annotations import math def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2: raise Exception("Matrices are not 2x2" ) __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(lowerCamelCase ) ) ] def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(lowerCamelCase ) ) ] def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("Odd matrices are not supported!" ) __lowerCAmelCase = len(lowerCamelCase ) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase ) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )] return top_left, top_right, bot_left, bot_right def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' return len(lowerCamelCase ), len(matrix[0] ) def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' print("\n".join(str(lowerCamelCase ) for line in matrix ) ) def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if matrix_dimensions(lowerCamelCase ) == (2, 2): return default_matrix_multiplication(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase ) __lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase ) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(lowerCamelCase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]: __lowerCAmelCase = ( "Unable to multiply these matrices, please check the dimensions.\n" f'''Matrix A: {matrixa}\n''' f'''Matrix B: {matrixa}''' ) raise Exception(lowerCamelCase ) __lowerCAmelCase = matrix_dimensions(lowerCamelCase ) __lowerCAmelCase = matrix_dimensions(lowerCamelCase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase ) __lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) ) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , lowerCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase ) # Removing the additional zeros for i in range(0 , lowerCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": lowerCAmelCase : Tuple = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
39
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
706
'''simple docstring''' import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput lowerCAmelCase : Optional[Any] = '''scheduler_config.json''' class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = 1 a : Optional[int] = 2 a : int = 3 a : Union[str, Any] = 4 a : int = 5 a : Optional[int] = 6 a : str = 7 a : List[Any] = 8 a : List[str] = 9 a : List[str] = 1_0 a : int = 1_1 a : Any = 1_2 a : Any = 1_3 a : Tuple = 1_4 @dataclass class UpperCAmelCase__ ( UpperCamelCase__ ): a : torch.FloatTensor class UpperCAmelCase__ : a : Tuple = SCHEDULER_CONFIG_NAME a : Union[str, Any] = [] a : str = True @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict: self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def UpperCAmelCase_ ( self ) -> str: return self._get_compatibles() @classmethod def UpperCAmelCase_ ( cls ) -> Tuple: __lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) ) __lowerCAmelCase = importlib.import_module(__name__.split("." )[0] ) __lowerCAmelCase = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
39
0
import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch lowerCAmelCase : str = logging.get_logger(__name__) @dataclass class UpperCAmelCase__ : def __init__( self , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=6.0 , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase="fp4" , UpperCamelCase=False , **UpperCamelCase , ) -> Optional[Any]: __lowerCAmelCase = load_in_abit __lowerCAmelCase = load_in_abit __lowerCAmelCase = llm_inta_threshold __lowerCAmelCase = llm_inta_skip_modules __lowerCAmelCase = llm_inta_enable_fpaa_cpu_offload __lowerCAmelCase = llm_inta_has_fpaa_weight __lowerCAmelCase = bnb_abit_quant_type __lowerCAmelCase = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: __lowerCAmelCase = torch.floataa elif isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = getattr(UpperCamelCase , UpperCamelCase ) elif isinstance(UpperCamelCase , torch.dtype ): __lowerCAmelCase = bnb_abit_compute_dtype else: raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" ) self.post_init() def UpperCAmelCase_ ( self ) -> Dict: if not isinstance(self.llm_inta_threshold , UpperCamelCase ): raise ValueError("llm_int8_threshold must be a float" ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCamelCase ): raise ValueError("llm_int8_skip_modules must be a list of strings" ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCamelCase ): raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" ) if not isinstance(self.llm_inta_has_fpaa_weight , UpperCamelCase ): raise ValueError("llm_int8_has_fp16_weight must be a boolean" ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" ) if not isinstance(self.bnb_abit_quant_type , UpperCamelCase ): raise ValueError("bnb_4bit_quant_type must be a string" ) if not isinstance(self.bnb_abit_use_double_quant , UpperCamelCase ): raise ValueError("bnb_4bit_use_double_quant must be a boolean" ) if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse( "0.39.0" ): raise ValueError( "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" ) def UpperCAmelCase_ ( self ) -> List[str]: return self.load_in_abit or self.load_in_abit def UpperCAmelCase_ ( self ) -> str: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , **UpperCamelCase ) -> Any: __lowerCAmelCase = cls(**UpperCamelCase ) __lowerCAmelCase = [] for key, value in kwargs.items(): if hasattr(UpperCamelCase , UpperCamelCase ): setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) to_remove.append(UpperCamelCase ) for key in to_remove: kwargs.pop(UpperCamelCase , UpperCamelCase ) if return_unused_kwargs: return config, kwargs else: return config def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[Any]: with open(UpperCamelCase , "w" , encoding="utf-8" ) as writer: __lowerCAmelCase = self.to_dict() __lowerCAmelCase = json.dumps(UpperCamelCase , indent=2 , sort_keys=UpperCamelCase ) + "\n" writer.write(UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Dict[str, Any]: __lowerCAmelCase = copy.deepcopy(self.__dict__ ) __lowerCAmelCase = str(output["bnb_4bit_compute_dtype"] ).split("." )[1] return output def __repr__( self ) -> List[str]: return F'''{self.__class__.__name__} {self.to_json_string()}''' def UpperCAmelCase_ ( self , UpperCamelCase = True ) -> str: if use_diff is True: __lowerCAmelCase = self.to_diff_dict() else: __lowerCAmelCase = self.to_dict() return json.dumps(UpperCamelCase , indent=2 , sort_keys=UpperCamelCase ) + "\n" def UpperCAmelCase_ ( self ) -> Dict[str, Any]: __lowerCAmelCase = self.to_dict() # get the default config dict __lowerCAmelCase = BitsAndBytesConfig().to_dict() __lowerCAmelCase = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: __lowerCAmelCase = value return serializable_config_dict
707
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger lowerCAmelCase : List[Any] = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , UpperCamelCase = None ) -> Union[str, Any]: __lowerCAmelCase = ( os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __lowerCAmelCase = Extractor def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __lowerCAmelCase = os.path.abspath(UpperCamelCase ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool: return force_extract or ( not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase )) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str: __lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase ) if not extractor_format: return input_path __lowerCAmelCase = self._get_output_path(UpperCamelCase ) if self._do_extract(UpperCamelCase , UpperCamelCase ): self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return output_path class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod @abstractmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: ... @staticmethod @abstractmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: ... class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): a : List[bytes] = [] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]: with open(UpperCamelCase , "rb" ) as f: return f.read(UpperCamelCase ) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if not magic_number: __lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) try: __lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase ) except OSError: return False return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: return tarfile.is_tarfile(UpperCamelCase ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: def resolved(UpperCamelCase ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase ) ) def badpath(UpperCamelCase , UpperCamelCase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase ) def badlink(UpperCamelCase , UpperCamelCase ) -> bool: # Links are interpreted relative to the directory containing the link __lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase ) __lowerCAmelCase = resolved(UpperCamelCase ) for finfo in members: if badpath(finfo.name , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = tarfile.open(UpperCamelCase ) tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x1F\x8B"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with gzip.open(UpperCamelCase , "rb" ) as gzip_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[Any] = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase , "rb" ) as fp: __lowerCAmelCase = _EndRecData(UpperCamelCase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be if len(UpperCamelCase ) == sizeCentralDir: __lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file: zip_file.extractall(UpperCamelCase ) zip_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with lzma.open(UpperCamelCase ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile" ) import rarfile os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = rarfile.RarFile(UpperCamelCase ) rf.extractall(UpperCamelCase ) rf.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : int = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard" ) import zstandard as zstd __lowerCAmelCase = zstd.ZstdDecompressor() with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh: dctx.copy_stream(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x42\x5A\x68"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with bza.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr" ) import pyazr os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive: archive.extractall(UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x04\x22\x4D\x18"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4" ) import lza.frame with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) a : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def UpperCAmelCase_ ( cls ) -> Optional[Any]: return max( len(UpperCamelCase ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase , UpperCamelCase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase ) except OSError: return b"" @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool: warnings.warn( "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'infer_extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/> __lowerCAmelCase = cls._get_magic_number_max_length() __lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return extractor_format @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase ) # Prevent parallel extractions __lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) ) with FileLock(UpperCamelCase ): shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg warnings.warn( "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format else: __lowerCAmelCase = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase , UpperCamelCase ) else: warnings.warn( "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0." , category=UpperCamelCase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase ): return extractor.extract(UpperCamelCase , UpperCamelCase )
39
0
'''simple docstring''' import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets lowerCAmelCase : Tuple = datasets.logging.get_logger(__name__) lowerCAmelCase : List[str] = '''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' lowerCAmelCase : Union[str, Any] = '''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' lowerCAmelCase : List[Any] = ''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=False , lowerCamelCase : Dict=False , lowerCamelCase : int=True , lowerCamelCase : str=False , lowerCamelCase : Tuple="dummy_doc" ): '''simple docstring''' __lowerCAmelCase = {doc: key_lines} __lowerCAmelCase = {doc: sys_lines} __lowerCAmelCase = {} __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: __lowerCAmelCase = reader.set_annotated_parse_trees(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase , lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(lowerCamelCase , sys_doc_lines[doc] , lowerCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: __lowerCAmelCase = reader.set_annotated_parse_trees(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase , lowerCamelCase ) if remove_nested: __lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(lowerCamelCase , lowerCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters __lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(lowerCamelCase , lowerCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters __lowerCAmelCase = reader.get_mention_assignments(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = reader.get_mention_assignments(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( "Number of removed nested coreferring mentions in the key " f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( "Number of resulting singleton clusters in the key " f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' "files, respectively" ) return doc_coref_infos def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = get_coref_infos(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = {} __lowerCAmelCase = 0 __lowerCAmelCase = 0 for name, metric in metrics: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(lowerCamelCase , lowerCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , f'''Recall: {recall * 1_00:.2f}''' , f''' Precision: {precision * 1_00:.2f}''' , f''' F1: {fa * 1_00:.2f}''' , ) if conll_subparts_num == 3: __lowerCAmelCase = (conll / 3) * 1_00 logger.info(f'''CoNLL score: {conll:.2f}''' ) output_scores.update({"conll_score": conll} ) return output_scores def __lowerCAmelCase ( lowerCamelCase : List[Any] ): '''simple docstring''' __lowerCAmelCase = False for line in key_lines: if not line.startswith("#" ): if len(line.split() ) > 6: __lowerCAmelCase = line.split()[5] if not parse_col == "-": __lowerCAmelCase = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase__ ( datasets.Metric ): def UpperCAmelCase_ ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Sequence(datasets.Value("string" ) ), } ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[ "https://github.com/ns-moosavi/coval", "https://www.aclweb.org/anthology/P16-1060", "http://www.conll.cemantix.org/2012/data.html", ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]: __lowerCAmelCase = [ ("mentions", evaluator.mentions), ("muc", evaluator.muc), ("bcub", evaluator.b_cubed), ("ceafe", evaluator.ceafe), ("lea", evaluator.lea), ] if min_span: __lowerCAmelCase = util.check_gold_parse_annotation(UpperCamelCase ) if not has_gold_parse: raise NotImplementedError("References should have gold parse annotation to use 'min_span'." ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" __lowerCAmelCase = evaluate( key_lines=UpperCamelCase , sys_lines=UpperCamelCase , metrics=UpperCamelCase , NP_only=UpperCamelCase , remove_nested=UpperCamelCase , keep_singletons=UpperCamelCase , min_span=UpperCamelCase , ) return score
708
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self ) -> List[str]: # test for the above condition self.test() def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = 0 __lowerCAmelCase = False while not completed: if counter == 1: self.reset() __lowerCAmelCase = self.advance() if not self.does_advance(UpperCamelCase ): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase ) counter += 1 if counter > 1_0000: raise Exception("update() does not fulfill the constraint." ) if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly." ) @abstractmethod def UpperCAmelCase_ ( self ) -> Dict: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> Dict: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) __lowerCAmelCase = token_ids __lowerCAmelCase = len(self.token_ids ) __lowerCAmelCase = -1 # the index of the currently fulfilled step __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> Optional[int]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.fulfilled_idx += 1 __lowerCAmelCase = True if self.fulfilled_idx == (self.seqlen - 1): __lowerCAmelCase = True __lowerCAmelCase = completed else: # failed to make progress. __lowerCAmelCase = True self.reset() return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = False __lowerCAmelCase = 0 def UpperCAmelCase_ ( self ) -> Optional[int]: return self.seqlen - (self.fulfilled_idx + 1) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]: __lowerCAmelCase = PhrasalConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.fulfilled_idx __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]: __lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] ) __lowerCAmelCase = {} for token_ids in nested_token_ids: __lowerCAmelCase = root for tidx, token_id in enumerate(UpperCamelCase ): if token_id not in level: __lowerCAmelCase = {} __lowerCAmelCase = level[token_id] if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" F''' {nested_token_ids}.''' ) __lowerCAmelCase = root def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: __lowerCAmelCase = self.trie for current_token in current_seq: __lowerCAmelCase = start[current_token] __lowerCAmelCase = list(start.keys() ) return next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: __lowerCAmelCase = self.next_tokens(UpperCamelCase ) return len(UpperCamelCase ) == 0 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = list(root.values() ) if len(UpperCamelCase ) == 0: return 1 else: return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: __lowerCAmelCase = self.count_leaves(UpperCamelCase ) return len(UpperCamelCase ) != leaf_count class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> List[Any]: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) __lowerCAmelCase = DisjunctiveTrie(UpperCamelCase ) __lowerCAmelCase = nested_token_ids __lowerCAmelCase = self.trie.max_height __lowerCAmelCase = [] __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.current_seq.append(UpperCamelCase ) __lowerCAmelCase = True else: __lowerCAmelCase = True self.reset() __lowerCAmelCase = self.trie.reached_leaf(self.current_seq ) __lowerCAmelCase = completed return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = False __lowerCAmelCase = [] def UpperCAmelCase_ ( self ) -> int: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]: __lowerCAmelCase = DisjunctiveConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.current_seq __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = constraints # max # of steps required to fulfill a given constraint __lowerCAmelCase = max([c.seqlen for c in constraints] ) __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = False self.init_state() def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = [] __lowerCAmelCase = None __lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints] def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __lowerCAmelCase = constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) else: __lowerCAmelCase = self.inprogress_constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) __lowerCAmelCase , __lowerCAmelCase = False, False if self.completed: __lowerCAmelCase = True __lowerCAmelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) ) __lowerCAmelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __lowerCAmelCase = None if len(self.pending_constraints ) == 0: # we're done! __lowerCAmelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(UpperCamelCase ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase ) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(UpperCamelCase ) __lowerCAmelCase = None if not complete and stepped: __lowerCAmelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __lowerCAmelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __lowerCAmelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str: __lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __lowerCAmelCase = [ constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase ) __lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
39
0
'''simple docstring''' import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_text_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"text": "string"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = TextDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_text_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"text": "string"} __lowerCAmelCase = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read() _check_text_dataset(lowerCamelCase , lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ): '''simple docstring''' if issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = text_path elif issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = [text_path] __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"text": "string"} __lowerCAmelCase = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_text_dataset(lowerCamelCase , lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any]=("train",) ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) for split in splits: __lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = TextDatasetReader({"train": text_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_text_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __lowerCAmelCase = {"text": "string"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = TextDatasetReader({"train": text_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_text_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ): '''simple docstring''' if split: __lowerCAmelCase = {split: text_path} else: __lowerCAmelCase = "train" __lowerCAmelCase = {"train": text_path, "test": text_path} __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"text": "string"} __lowerCAmelCase = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_text_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
709
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : List[Any] = KandinskyImgaImgPipeline a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""] a : List[Any] = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", ] a : Any = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a : Union[str, Any] = False @property def UpperCAmelCase_ ( self ) -> int: return 32 @property def UpperCAmelCase_ ( self ) -> List[str]: return 32 @property def UpperCAmelCase_ ( self ) -> Dict: return self.time_input_dim @property def UpperCAmelCase_ ( self ) -> int: return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self ) -> int: return 100 @property def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) __lowerCAmelCase = MultilingualCLIP(UpperCamelCase ) __lowerCAmelCase = text_encoder.eval() return text_encoder @property def UpperCAmelCase_ ( self ) -> List[str]: torch.manual_seed(0 ) __lowerCAmelCase = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase ) return model @property def UpperCAmelCase_ ( self ) -> List[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self ) -> Dict: torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowerCAmelCase = DDIMScheduler(**UpperCamelCase ) __lowerCAmelCase = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]: __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(UpperCamelCase ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(UpperCamelCase ) else: __lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) __lowerCAmelCase = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**UpperCamelCase ) __lowerCAmelCase = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowerCAmelCase = "A red cartoon frog, 4k" __lowerCAmelCase = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase ) __lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(UpperCamelCase ) pipeline.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase = pipeline( UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
39
0
'''simple docstring''' lowerCAmelCase : Any = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = input("Enter message: " ) __lowerCAmelCase = input("Enter key [alphanumeric]: " ) __lowerCAmelCase = input("Encrypt/Decrypt [e/d]: " ) if mode.lower().startswith("e" ): __lowerCAmelCase = "encrypt" __lowerCAmelCase = encrypt_message(lowerCamelCase , lowerCamelCase ) elif mode.lower().startswith("d" ): __lowerCAmelCase = "decrypt" __lowerCAmelCase = decrypt_message(lowerCamelCase , lowerCamelCase ) print(f'''\n{mode.title()}ed message:''' ) print(lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : str ): '''simple docstring''' return translate_message(lowerCamelCase , lowerCamelCase , "encrypt" ) def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : str ): '''simple docstring''' return translate_message(lowerCamelCase , lowerCamelCase , "decrypt" ) def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : str ): '''simple docstring''' __lowerCAmelCase = [] __lowerCAmelCase = 0 __lowerCAmelCase = key.upper() for symbol in message: __lowerCAmelCase = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(lowerCamelCase ): __lowerCAmelCase = 0 else: translated.append(lowerCamelCase ) return "".join(lowerCamelCase ) if __name__ == "__main__": main()
710
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') lowerCAmelCase : Any = logging.getLogger(__name__) @dataclass class UpperCAmelCase__ : a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a : bool = field( default=UpperCamelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class UpperCAmelCase__ : a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) a : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : bool = field( default=UpperCamelCase__ , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def UpperCAmelCase_ ( self ) -> Tuple: if self.train_file is not None: __lowerCAmelCase = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __lowerCAmelCase = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class UpperCAmelCase__ : a : PreTrainedTokenizerBase a : Union[bool, str, PaddingStrategy] = True a : Optional[int] = None a : Optional[int] = None def __call__( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = "label" if "label" in features[0].keys() else "labels" __lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features] __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = len(features[0]["input_ids"] ) __lowerCAmelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features ] __lowerCAmelCase = list(chain(*UpperCamelCase ) ) __lowerCAmelCase = self.tokenizer.pad( UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten __lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()} # Add back labels __lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa ) return batch def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowerCamelCase ) datasets.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __lowerCAmelCase = {} if data_args.train_file is not None: __lowerCAmelCase = data_args.train_file if data_args.validation_file is not None: __lowerCAmelCase = data_args.validation_file __lowerCAmelCase = data_args.train_file.split("." )[-1] __lowerCAmelCase = load_dataset( lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. __lowerCAmelCase = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. __lowerCAmelCase = [f'''ending{i}''' for i in range(4 )] __lowerCAmelCase = "sent1" __lowerCAmelCase = "sent2" if data_args.max_seq_length is None: __lowerCAmelCase = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) __lowerCAmelCase = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(lowerCamelCase : Tuple ): __lowerCAmelCase = [[context] * 4 for context in examples[context_name]] __lowerCAmelCase = examples[question_header_name] __lowerCAmelCase = [ [f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase ) ] # Flatten out __lowerCAmelCase = list(chain(*lowerCamelCase ) ) __lowerCAmelCase = list(chain(*lowerCamelCase ) ) # Tokenize __lowerCAmelCase = tokenizer( lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) __lowerCAmelCase = raw_datasets["train"] if data_args.max_train_samples is not None: __lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples ) __lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): __lowerCAmelCase = train_dataset.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) __lowerCAmelCase = raw_datasets["validation"] if data_args.max_eval_samples is not None: __lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples ) __lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): __lowerCAmelCase = eval_dataset.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator __lowerCAmelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(lowerCamelCase : Dict ): __lowerCAmelCase , __lowerCAmelCase = eval_predictions __lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase ) trainer.save_model() # Saves the tokenizer too for easy upload __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase ) ) __lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics("train" , lowerCamelCase ) trainer.save_metrics("train" , lowerCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowerCAmelCase = trainer.evaluate() __lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase ) __lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics("eval" , lowerCamelCase ) trainer.save_metrics("eval" , lowerCamelCase ) __lowerCAmelCase = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**lowerCamelCase ) else: trainer.create_model_card(**lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
39
0
'''simple docstring''' import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __lowerCAmelCase ( lowerCamelCase : int = 3 ): '''simple docstring''' if isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError("number of qubits must be a integer." ) if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0." ) if math.floor(lowerCamelCase ) != number_of_qubits: raise ValueError("number of qubits must be exact integer." ) if number_of_qubits > 10: raise ValueError("number of qubits too large to simulate(>10)." ) __lowerCAmelCase = QuantumRegister(lowerCamelCase , "qr" ) __lowerCAmelCase = ClassicalRegister(lowerCamelCase , "cr" ) __lowerCAmelCase = QuantumCircuit(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = number_of_qubits for i in range(lowerCamelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(lowerCamelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowerCamelCase , lowerCamelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(lowerCamelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(lowerCamelCase , lowerCamelCase ) # simulate with 10000 shots __lowerCAmelCase = Aer.get_backend("qasm_simulator" ) __lowerCAmelCase = execute(lowerCamelCase , lowerCamelCase , shots=1_00_00 ) return job.result().get_counts(lowerCamelCase ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
711
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {} lowerCAmelCase : Dict[Optional[str], str] = {} lowerCAmelCase : Dict[Optional[str], Exception] = {} def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __lowerCAmelCase = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __lowerCAmelCase = format_type def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __lowerCAmelCase = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def __lowerCAmelCase ( lowerCamelCase : Optional[str] ): '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = get_format_type_from_alias(lowerCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
39
0
'''simple docstring''' import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Any = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : str = XLMProphetNetTokenizer a : Tuple = False a : Any = True def UpperCAmelCase_ ( self ) -> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = XLMProphetNetTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = "[PAD]" __lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "[PAD]" ) self.assertEqual(vocab_keys[1] , "[CLS]" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(UpperCamelCase ) , 1012 ) def UpperCAmelCase_ ( self ) -> Any: self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = XLMProphetNetTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) __lowerCAmelCase = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def UpperCAmelCase_ ( self ) -> List[str]: return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = "Hello World!" __lowerCAmelCase = [3_5389, 6672, 49, 2] self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) ) @slow def UpperCAmelCase_ ( self ) -> List[Any]: # fmt: off __lowerCAmelCase = {"input_ids": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
712
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCAmelCase ( lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowerCAmelCase = [3, 3, 3, 3] __lowerCAmelCase = [5, 5, 5, 5] elif "fl4" in model_name: __lowerCAmelCase = [4, 4, 4, 4] __lowerCAmelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowerCAmelCase = [3, 3, 3, 3] if "lrf" in model_name: __lowerCAmelCase = [3, 3, 3, 3] else: __lowerCAmelCase = [2, 2, 2, 2] if "tiny" in model_name: __lowerCAmelCase = 96 elif "small" in model_name: __lowerCAmelCase = 96 elif "base" in model_name: __lowerCAmelCase = 1_28 elif "large" in model_name: __lowerCAmelCase = 1_92 elif "xlarge" in model_name: __lowerCAmelCase = 2_56 elif "huge" in model_name: __lowerCAmelCase = 3_52 # set label information __lowerCAmelCase = "huggingface/label-files" if "large" in model_name or "huge" in model_name: __lowerCAmelCase = "imagenet-22k-id2label.json" else: __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) ) __lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()} __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = FocalNetConfig( embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , ) return config def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' if "patch_embed.proj" in name: __lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowerCAmelCase = "encoder." + name if "encoder.layers" in name: __lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: __lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: __lowerCAmelCase = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": __lowerCAmelCase = "layernorm.weight" if name == "norm.bias": __lowerCAmelCase = "layernorm.bias" if "head" in name: __lowerCAmelCase = name.replace("head" , "classifier" ) else: __lowerCAmelCase = "focalnet." + name return name def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ): '''simple docstring''' __lowerCAmelCase = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on __lowerCAmelCase = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase ) __lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): __lowerCAmelCase = state_dict.pop(lowerCamelCase ) __lowerCAmelCase = val __lowerCAmelCase = get_focalnet_config(lowerCamelCase ) __lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase ) model.eval() # load state dict model.load_state_dict(lowerCamelCase ) # verify conversion __lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCAmelCase = BitImageProcessor( do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , ) __lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) __lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" ) __lowerCAmelCase = transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) __lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 ) __lowerCAmelCase = model(**lowerCamelCase ) __lowerCAmelCase = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": __lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": __lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": __lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": __lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": __lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: print(f'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(f'''{model_name}''' ) processor.push_to_hub(f'''{model_name}''' ) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase : Optional[int] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
39
0
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self ) -> List[str]: # test for the above condition self.test() def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = 0 __lowerCAmelCase = False while not completed: if counter == 1: self.reset() __lowerCAmelCase = self.advance() if not self.does_advance(UpperCamelCase ): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase ) counter += 1 if counter > 1_0000: raise Exception("update() does not fulfill the constraint." ) if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly." ) @abstractmethod def UpperCAmelCase_ ( self ) -> Dict: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> Dict: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) __lowerCAmelCase = token_ids __lowerCAmelCase = len(self.token_ids ) __lowerCAmelCase = -1 # the index of the currently fulfilled step __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> Optional[int]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.fulfilled_idx += 1 __lowerCAmelCase = True if self.fulfilled_idx == (self.seqlen - 1): __lowerCAmelCase = True __lowerCAmelCase = completed else: # failed to make progress. __lowerCAmelCase = True self.reset() return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = False __lowerCAmelCase = 0 def UpperCAmelCase_ ( self ) -> Optional[int]: return self.seqlen - (self.fulfilled_idx + 1) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]: __lowerCAmelCase = PhrasalConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.fulfilled_idx __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]: __lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] ) __lowerCAmelCase = {} for token_ids in nested_token_ids: __lowerCAmelCase = root for tidx, token_id in enumerate(UpperCamelCase ): if token_id not in level: __lowerCAmelCase = {} __lowerCAmelCase = level[token_id] if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" F''' {nested_token_ids}.''' ) __lowerCAmelCase = root def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: __lowerCAmelCase = self.trie for current_token in current_seq: __lowerCAmelCase = start[current_token] __lowerCAmelCase = list(start.keys() ) return next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: __lowerCAmelCase = self.next_tokens(UpperCamelCase ) return len(UpperCamelCase ) == 0 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = list(root.values() ) if len(UpperCamelCase ) == 0: return 1 else: return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: __lowerCAmelCase = self.count_leaves(UpperCamelCase ) return len(UpperCamelCase ) != leaf_count class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> List[Any]: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) __lowerCAmelCase = DisjunctiveTrie(UpperCamelCase ) __lowerCAmelCase = nested_token_ids __lowerCAmelCase = self.trie.max_height __lowerCAmelCase = [] __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.current_seq.append(UpperCamelCase ) __lowerCAmelCase = True else: __lowerCAmelCase = True self.reset() __lowerCAmelCase = self.trie.reached_leaf(self.current_seq ) __lowerCAmelCase = completed return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = False __lowerCAmelCase = [] def UpperCAmelCase_ ( self ) -> int: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]: __lowerCAmelCase = DisjunctiveConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.current_seq __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = constraints # max # of steps required to fulfill a given constraint __lowerCAmelCase = max([c.seqlen for c in constraints] ) __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = False self.init_state() def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = [] __lowerCAmelCase = None __lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints] def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __lowerCAmelCase = constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) else: __lowerCAmelCase = self.inprogress_constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) __lowerCAmelCase , __lowerCAmelCase = False, False if self.completed: __lowerCAmelCase = True __lowerCAmelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) ) __lowerCAmelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __lowerCAmelCase = None if len(self.pending_constraints ) == 0: # we're done! __lowerCAmelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(UpperCamelCase ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase ) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(UpperCamelCase ) __lowerCAmelCase = None if not complete and stepped: __lowerCAmelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __lowerCAmelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __lowerCAmelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str: __lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __lowerCAmelCase = [ constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase ) __lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
713
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase : str = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase : Optional[Any] = { '''squeezebert/squeezebert-uncased''': 5_1_2, '''squeezebert/squeezebert-mnli''': 5_1_2, '''squeezebert/squeezebert-mnli-headless''': 5_1_2, } lowerCAmelCase : Tuple = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Dict = VOCAB_FILES_NAMES a : Any = PRETRAINED_VOCAB_FILES_MAP a : Dict = PRETRAINED_INIT_CONFIGURATION a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Optional[Any] = SqueezeBertTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]: super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars ): __lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) ) __lowerCAmelCase = do_lower_case __lowerCAmelCase = strip_accents __lowerCAmelCase = tokenize_chinese_chars __lowerCAmelCase = normalizer_class(**UpperCamelCase ) __lowerCAmelCase = do_lower_case def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str: __lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]: __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]: __lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase )
39
0
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : Dict = KandinskyVaaPriorPipeline a : Dict = ["""prompt"""] a : Dict = ["""prompt""", """negative_prompt"""] a : Dict = [ """num_images_per_prompt""", """generator""", """num_inference_steps""", """latents""", """negative_prompt""", """guidance_scale""", """output_type""", """return_dict""", ] a : List[Any] = False @property def UpperCAmelCase_ ( self ) -> Dict: return 32 @property def UpperCAmelCase_ ( self ) -> Tuple: return 32 @property def UpperCAmelCase_ ( self ) -> List[Any]: return self.time_input_dim @property def UpperCAmelCase_ ( self ) -> str: return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self ) -> str: return 100 @property def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def UpperCAmelCase_ ( self ) -> List[str]: torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase ) @property def UpperCAmelCase_ ( self ) -> Dict: torch.manual_seed(0 ) __lowerCAmelCase = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } __lowerCAmelCase = PriorTransformer(**UpperCamelCase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __lowerCAmelCase = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def UpperCAmelCase_ ( self ) -> List[str]: torch.manual_seed(0 ) __lowerCAmelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __lowerCAmelCase = CLIPVisionModelWithProjection(UpperCamelCase ) return model @property def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = self.dummy_prior __lowerCAmelCase = self.dummy_image_encoder __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_image_processor __lowerCAmelCase = UnCLIPScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCamelCase , clip_sample_range=10.0 , ) __lowerCAmelCase = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Dict: if str(UpperCamelCase ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(UpperCamelCase ) else: __lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) __lowerCAmelCase = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def UpperCAmelCase_ ( self ) -> int: __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**UpperCamelCase ) __lowerCAmelCase = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) __lowerCAmelCase = output.image_embeds __lowerCAmelCase = pipe( **self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0] __lowerCAmelCase = image[0, -10:] __lowerCAmelCase = image_from_tuple[0, -10:] assert image.shape == (1, 32) __lowerCAmelCase = np.array( [-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = torch_device == "cpu" __lowerCAmelCase = True __lowerCAmelCase = False self._test_inference_batch_single_identical( test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , test_mean_pixel_difference=UpperCamelCase , ) @skip_mps def UpperCAmelCase_ ( self ) -> int: __lowerCAmelCase = torch_device == "cpu" __lowerCAmelCase = False self._test_attention_slicing_forward_pass( test_max_difference=UpperCamelCase , test_mean_pixel_difference=UpperCamelCase , )
714
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(lowerCamelCase ) / len(lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
39
0
'''simple docstring''' from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) lowerCAmelCase : Optional[Any] = '''pytorch_model.bin''' lowerCAmelCase : Optional[Any] = '''pytorch_model.bin.index.json''' lowerCAmelCase : List[str] = '''adapter_config.json''' lowerCAmelCase : str = '''adapter_model.bin''' lowerCAmelCase : Tuple = '''adapter_model.safetensors''' lowerCAmelCase : int = '''tf_model.h5''' lowerCAmelCase : Optional[Any] = '''tf_model.h5.index.json''' lowerCAmelCase : str = '''model.ckpt''' lowerCAmelCase : Union[str, Any] = '''flax_model.msgpack''' lowerCAmelCase : List[Any] = '''flax_model.msgpack.index.json''' lowerCAmelCase : List[str] = '''model.safetensors''' lowerCAmelCase : Tuple = '''model.safetensors.index.json''' lowerCAmelCase : List[Any] = '''config.json''' lowerCAmelCase : int = '''preprocessor_config.json''' lowerCAmelCase : Any = FEATURE_EXTRACTOR_NAME lowerCAmelCase : Optional[int] = '''generation_config.json''' lowerCAmelCase : int = '''modelcard.json''' lowerCAmelCase : int = '''▁''' lowerCAmelCase : Optional[int] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility lowerCAmelCase : Optional[int] = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. lowerCAmelCase : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] lowerCAmelCase : Any = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def __lowerCAmelCase ( lowerCamelCase : List[Any] ): '''simple docstring''' if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ): if "dev" in min_version: __lowerCAmelCase = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: __lowerCAmelCase = f'''This example requires a minimum version of {min_version},''' error_message += f''' but the version found is {__version__}.\n''' raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." )
715
'''simple docstring''' import re def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' __lowerCAmelCase = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
39
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase : Optional[Any] = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
716
'''simple docstring''' import os import sys import unittest lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = {"BertModelTest": "BertModelTester"} __lowerCAmelCase = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase ) __lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase ) __lowerCAmelCase = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } __lowerCAmelCase = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } __lowerCAmelCase = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
39
0
'''simple docstring''' import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def __lowerCAmelCase ( lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] ) -> Tuple: '''simple docstring''' warnings.warn( "The preprocess method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor.preprocess instead" , lowerCamelCase , ) if isinstance(lowerCamelCase , torch.Tensor ): return image elif isinstance(lowerCamelCase , PIL.Image.Image ): __lowerCAmelCase = [image] if isinstance(image[0] , PIL.Image.Image ): __lowerCAmelCase , __lowerCAmelCase = image[0].size __lowerCAmelCase , __lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 __lowerCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] __lowerCAmelCase = np.concatenate(lowerCamelCase , axis=0 ) __lowerCAmelCase = np.array(lowerCamelCase ).astype(np.floataa ) / 2_55.0 __lowerCAmelCase = image.transpose(0 , 3 , 1 , 2 ) __lowerCAmelCase = 2.0 * image - 1.0 __lowerCAmelCase = torch.from_numpy(lowerCamelCase ) elif isinstance(image[0] , torch.Tensor ): __lowerCAmelCase = torch.cat(lowerCamelCase , dim=0 ) return image def __lowerCAmelCase ( lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[Any]: '''simple docstring''' if isinstance(lowerCamelCase , torch.Tensor ): return mask elif isinstance(lowerCamelCase , PIL.Image.Image ): __lowerCAmelCase = [mask] if isinstance(mask[0] , PIL.Image.Image ): __lowerCAmelCase , __lowerCAmelCase = mask[0].size __lowerCAmelCase , __lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __lowerCAmelCase = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask] __lowerCAmelCase = np.concatenate(lowerCamelCase , axis=0 ) __lowerCAmelCase = mask.astype(np.floataa ) / 2_55.0 __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = torch.from_numpy(lowerCamelCase ) elif isinstance(mask[0] , torch.Tensor ): __lowerCAmelCase = torch.cat(lowerCamelCase , dim=0 ) return mask class UpperCAmelCase__ ( UpperCamelCase__ ): a : UNetaDModel a : RePaintScheduler def __init__( self , UpperCamelCase , UpperCamelCase ) -> List[str]: super().__init__() self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase ) @torch.no_grad() def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = 250 , UpperCamelCase = 0.0 , UpperCamelCase = 10 , UpperCamelCase = 10 , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , ) -> Union[ImagePipelineOutput, Tuple]: __lowerCAmelCase = image __lowerCAmelCase = _preprocess_image(UpperCamelCase ) __lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype ) __lowerCAmelCase = _preprocess_mask(UpperCamelCase ) __lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype ) __lowerCAmelCase = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) __lowerCAmelCase = original_image.shape __lowerCAmelCase = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase , self.device ) __lowerCAmelCase = eta __lowerCAmelCase = self.scheduler.timesteps[0] + 1 __lowerCAmelCase = generator[0] if isinstance(UpperCamelCase , UpperCamelCase ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual __lowerCAmelCase = self.unet(UpperCamelCase , UpperCamelCase ).sample # compute previous image: x_t -> x_t-1 __lowerCAmelCase = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample else: # compute the reverse: x_t-1 -> x_t __lowerCAmelCase = self.scheduler.undo_step(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = t __lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCAmelCase = self.numpy_to_pil(UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase )
717
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class UpperCAmelCase__ ( UpperCamelCase__ ): a : torch.FloatTensor class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]: super().__init__() __lowerCAmelCase = num_attention_heads __lowerCAmelCase = attention_head_dim __lowerCAmelCase = num_attention_heads * attention_head_dim __lowerCAmelCase = in_channels __lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) # 3. Define transformers blocks __lowerCAmelCase = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , ) for d in range(UpperCamelCase ) ] ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape __lowerCAmelCase = batch_frames // num_frames __lowerCAmelCase = hidden_states __lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __lowerCAmelCase = self.norm(UpperCamelCase ) __lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = self.proj_in(UpperCamelCase ) # 2. Blocks for block in self.transformer_blocks: __lowerCAmelCase = block( UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , ) # 3. Output __lowerCAmelCase = self.proj_out(UpperCamelCase ) __lowerCAmelCase = ( hidden_states[None, None, :] .reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=UpperCamelCase )
39
0
'''simple docstring''' import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowerCAmelCase : str = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('''''', '''|''', '''|'''), datarow=DataRow('''''', '''|''', '''|'''), padding=1, with_header_hide=None, ) lowerCAmelCase : Optional[int] = [] lowerCAmelCase : str = [] lowerCAmelCase : str = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}} lowerCAmelCase : List[Any] = [ { '''type''': '''header''', '''text''': { '''type''': '''plain_text''', '''text''': f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results', '''emoji''': True, }, } ] lowerCAmelCase : Dict = 0 for log in Path().glob('''*.log'''): lowerCAmelCase : List[Any] = 0 with open(log, '''r''') as f: for line in f: lowerCAmelCase : List[str] = json.loads(line) if line.get('''nodeid''', '''''') != "": lowerCAmelCase : List[Any] = line['''nodeid'''] if line.get('''duration''', None) is not None: lowerCAmelCase : str = f'{line["duration"]:.4f}' if line.get('''outcome''', '''''') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('''_''')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowerCAmelCase : Dict = [] log.unlink() lowerCAmelCase : Optional[int] = '''''' lowerCAmelCase : Optional[Any] = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" lowerCAmelCase : Any = [] lowerCAmelCase : str = {} for test in failed_tests: lowerCAmelCase : Optional[int] = test[0].split('''::''') lowerCAmelCase : Optional[Any] = data[0].split('''/''')[-1] if data[0] not in filesafailed: lowerCAmelCase : List[str] = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowerCAmelCase : Optional[Any] = [test[0] for test in failed_table] lowerCAmelCase : str = list(set(files)) # Count number of instances in failed_tests lowerCAmelCase : Tuple = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowerCAmelCase : List[str] = tabulate( table, headers=['''Test Location''', '''Num Failed'''], tablefmt=hf_table_format, stralign='''right''', ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_0_0_0: lowerCAmelCase : List[Any] = '''Too many failed tests, please see the full report in the Action results.''' lowerCAmelCase : Tuple = len(err) + 1_0 lowerCAmelCase : List[Any] = message[: 3_0_0_0 - offset] + f'\n...\n```\n{err}' print(f'### {message}') else: lowerCAmelCase : List[Any] = '''No failed tests! 🤗''' print(f'## {message}') payload.append(no_error_payload) if os.environ.get('''TEST_TYPE''', '''''') != "": from slack_sdk import WebClient lowerCAmelCase : List[str] = WebClient(token=os.environ['''SLACK_API_TOKEN''']) if message != "No failed tests! 🤗": lowerCAmelCase : int = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': message, }, } payload.append(md_report) lowerCAmelCase : List[Any] = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': '''*For more details:*''', }, '''accessory''': { '''type''': '''button''', '''text''': { '''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True, }, '''url''': f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } payload.append(action_button) lowerCAmelCase : str = { '''type''': '''context''', '''elements''': [ { '''type''': '''plain_text''', '''text''': f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}', } ], } payload.append(date_report) lowerCAmelCase : Optional[Any] = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload) lowerCAmelCase : Union[str, Any] = response.data['''ts'''] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowerCAmelCase : Optional[int] = '''''' for i, row in enumerate(test_failures): if row[0] != test_class: lowerCAmelCase : Union[str, Any] = row[0] else: lowerCAmelCase : Tuple = '''''' lowerCAmelCase : Tuple = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```', }, } client.chat_postMessage( channel='''#accelerate-ci-daily''', thread_ts=ts, blocks=[payload], )
718
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" __lowerCAmelCase = "f32le" __lowerCAmelCase = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: __lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error __lowerCAmelCase = output_stream[0] __lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" if format_for_conversion == "s16le": __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) __lowerCAmelCase = platform.system() if system == "Linux": __lowerCAmelCase = "alsa" __lowerCAmelCase = "default" elif system == "Darwin": __lowerCAmelCase = "avfoundation" __lowerCAmelCase = ":0" elif system == "Windows": __lowerCAmelCase = "dshow" __lowerCAmelCase = "default" __lowerCAmelCase = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase ) for item in iterator: yield item def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ): '''simple docstring''' if stream_chunk_s is not None: __lowerCAmelCase = stream_chunk_s else: __lowerCAmelCase = chunk_length_s __lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": __lowerCAmelCase = np.intaa __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = np.floataa __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: __lowerCAmelCase = chunk_length_s / 6 __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase , (int, float) ): __lowerCAmelCase = [stride_length_s, stride_length_s] __lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __lowerCAmelCase = datetime.datetime.now() __lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ): # Put everything back in numpy scale __lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase ) __lowerCAmelCase = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) __lowerCAmelCase = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ): '''simple docstring''' __lowerCAmelCase = B"" __lowerCAmelCase , __lowerCAmelCase = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) __lowerCAmelCase = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: __lowerCAmelCase = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator __lowerCAmelCase = (_stride_left, stride_right) __lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride} if stream: __lowerCAmelCase = False yield item __lowerCAmelCase = stride_left __lowerCAmelCase = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: __lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)} if stream: __lowerCAmelCase = False yield item def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process: while True: __lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
39
0
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def __lowerCAmelCase ( lowerCamelCase : Union[dict, list, tuple, torch.Tensor] ): '''simple docstring''' __lowerCAmelCase = [] if isinstance(lowerCamelCase , lowerCamelCase ): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Tuple[int, ...] ): '''simple docstring''' __lowerCAmelCase = [] for d in reversed(lowerCamelCase ): idx.append(flat_idx % d ) __lowerCAmelCase = flat_idx // d return tuple(reversed(lowerCamelCase ) ) @torch.jit.ignore def __lowerCAmelCase ( lowerCamelCase : Sequence[int] , lowerCamelCase : Sequence[int] , lowerCamelCase : Sequence[int] , lowerCamelCase : Optional[Sequence[bool]] = None , lowerCamelCase : Optional[Sequence[bool]] = None , ): '''simple docstring''' def reduce_edge_list(lowerCamelCase : List[bool] ) -> None: __lowerCAmelCase = True for i in range(len(lowerCamelCase ) ): __lowerCAmelCase = -1 * (i + 1) l[reversed_idx] &= tally __lowerCAmelCase = l[reversed_idx] if start_edges is None: __lowerCAmelCase = [s == 0 for s in start] reduce_edge_list(lowerCamelCase ) if end_edges is None: __lowerCAmelCase = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )] reduce_edge_list(lowerCamelCase ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase ) == 0: return [()] elif len(lowerCamelCase ) == 1: return [(slice(start[0] , end[0] + 1 ),)] __lowerCAmelCase = [] __lowerCAmelCase = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase , lowerCamelCase ): if s == e: path_list.append(slice(lowerCamelCase , s + 1 ) ) else: break __lowerCAmelCase = tuple(lowerCamelCase ) __lowerCAmelCase = len(lowerCamelCase ) # start == end, and we're done if divergence_idx == len(lowerCamelCase ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None __lowerCAmelCase = start[divergence_idx] return tuple( path + (slice(lowerCamelCase , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None __lowerCAmelCase = end[divergence_idx] return tuple( path + (slice(lowerCamelCase , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) __lowerCAmelCase = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def __lowerCAmelCase ( lowerCamelCase : torch.Tensor , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = t.shape[:no_batch_dims] __lowerCAmelCase = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) ) # _get_minimal_slice_set is inclusive __lowerCAmelCase = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) ) # Get an ordered list of slices to perform __lowerCAmelCase = _get_minimal_slice_set( lowerCamelCase , lowerCamelCase , lowerCamelCase , ) __lowerCAmelCase = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def __lowerCAmelCase ( lowerCamelCase : Callable , lowerCamelCase : Dict[str, Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool = False , lowerCamelCase : Any = None , lowerCamelCase : bool = False , ): '''simple docstring''' if not (len(lowerCamelCase ) > 0): raise ValueError("Must provide at least one input" ) __lowerCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )] __lowerCAmelCase = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] ) def _prep_inputs(lowerCamelCase : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: __lowerCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) __lowerCAmelCase = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: __lowerCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t __lowerCAmelCase = tensor_tree_map(_prep_inputs , lowerCamelCase ) __lowerCAmelCase = None if _out is not None: __lowerCAmelCase = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) __lowerCAmelCase = 1 for d in orig_batch_dims: flat_batch_dim *= d __lowerCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCamelCase : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t __lowerCAmelCase = 0 __lowerCAmelCase = prepped_outputs for _ in range(lowerCamelCase ): # Chunk the input if not low_mem: __lowerCAmelCase = _select_chunk else: __lowerCAmelCase = partial( _chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , ) __lowerCAmelCase = tensor_tree_map(lowerCamelCase , lowerCamelCase ) # Run the layer on the chunk __lowerCAmelCase = layer(**lowerCamelCase ) # Allocate space for the output if out is None: __lowerCAmelCase = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase ) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase , lowerCamelCase ): def assign(lowerCamelCase : dict , lowerCamelCase : dict ) -> None: for k, v in da.items(): if isinstance(lowerCamelCase , lowerCamelCase ): assign(lowerCamelCase , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: __lowerCAmelCase = da[k] assign(lowerCamelCase , lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): for xa, xa in zip(lowerCamelCase , lowerCamelCase ): if _add_into_out: xa[i : i + chunk_size] += xa else: __lowerCAmelCase = xa elif isinstance(lowerCamelCase , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: __lowerCAmelCase = output_chunk else: raise ValueError("Not supported" ) i += chunk_size __lowerCAmelCase = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase ) return out class UpperCAmelCase__ : def __init__( self , UpperCamelCase = 512 , ) -> Any: __lowerCAmelCase = max_chunk_size __lowerCAmelCase = None __lowerCAmelCase = None def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size __lowerCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] __lowerCAmelCase = [c for c in candidates if c > min_chunk_size] __lowerCAmelCase = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(UpperCamelCase ) -> bool: try: with torch.no_grad(): fn(*UpperCamelCase , chunk_size=UpperCamelCase ) return True except RuntimeError: return False __lowerCAmelCase = 0 __lowerCAmelCase = len(UpperCamelCase ) - 1 while i > min_viable_chunk_size_index: __lowerCAmelCase = test_chunk_size(candidates[i] ) if not viable: __lowerCAmelCase = (min_viable_chunk_size_index + i) // 2 else: __lowerCAmelCase = i __lowerCAmelCase = (i + len(UpperCamelCase ) - 1) // 2 return candidates[min_viable_chunk_size_index] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool: __lowerCAmelCase = True for aa, aa in zip(UpperCamelCase , UpperCamelCase ): assert type(UpperCamelCase ) == type(UpperCamelCase ) if isinstance(UpperCamelCase , (list, tuple) ): consistent &= self._compare_arg_caches(UpperCamelCase , UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase : x[0] )] __lowerCAmelCase = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase : x[0] )] consistent &= self._compare_arg_caches(UpperCamelCase , UpperCamelCase ) else: consistent &= aa == aa return consistent def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> int: __lowerCAmelCase = True __lowerCAmelCase = tree_map(lambda UpperCamelCase : a.shape if isinstance(UpperCamelCase , torch.Tensor ) else a , UpperCamelCase , UpperCamelCase ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(UpperCamelCase ) __lowerCAmelCase = self._compare_arg_caches(self.cached_arg_data , UpperCamelCase ) else: # Otherwise, we can reuse the precomputed value __lowerCAmelCase = False if not consistent: __lowerCAmelCase = self._determine_favorable_chunk_size( UpperCamelCase , UpperCamelCase , UpperCamelCase , ) __lowerCAmelCase = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
719
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def __lowerCAmelCase ( lowerCamelCase : List[str] ): '''simple docstring''' return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class UpperCAmelCase__ ( UpperCamelCase__ ): @staticmethod def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple: __lowerCAmelCase = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" ) download_parser.set_defaults(func=UpperCamelCase ) def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: __lowerCAmelCase = model __lowerCAmelCase = cache __lowerCAmelCase = force __lowerCAmelCase = trust_remote_code def UpperCAmelCase_ ( self ) -> Any: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
39
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Dict = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = ['''MBartTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = ['''MBartTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ '''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MBartForCausalLM''', '''MBartForConditionalGeneration''', '''MBartForQuestionAnswering''', '''MBartForSequenceClassification''', '''MBartModel''', '''MBartPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ '''TFMBartForConditionalGeneration''', '''TFMBartModel''', '''TFMBartPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ '''FlaxMBartForConditionalGeneration''', '''FlaxMBartForQuestionAnswering''', '''FlaxMBartForSequenceClassification''', '''FlaxMBartModel''', '''FlaxMBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
720
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
39
0
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 __lowerCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __lowerCAmelCase = min(lowerCamelCase , lowerCamelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=1_0, r=5))
721
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Optional[Any] = """dpr""" def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple: super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = projection_dim __lowerCAmelCase = position_embedding_type
39
0
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ): '''simple docstring''' if isinstance(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = np.full((len(lowerCamelCase ), sequence_length, 2) , lowerCamelCase ) else: __lowerCAmelCase = np.full((len(lowerCamelCase ), sequence_length) , lowerCamelCase ) for i, tensor in enumerate(lowerCamelCase ): if padding_side == "right": if isinstance(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = tensor[:sequence_length] else: __lowerCAmelCase = tensor[:sequence_length] else: if isinstance(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = tensor[:sequence_length] else: __lowerCAmelCase = tensor[:sequence_length] return out_tensor.tolist() def __lowerCAmelCase ( lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = ord(lowerCamelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True __lowerCAmelCase = unicodedata.category(lowerCamelCase ) if cat.startswith("P" ): return True return False @dataclass class UpperCAmelCase__ ( UpperCamelCase__ ): a : PreTrainedTokenizerBase a : Union[bool, str, PaddingStrategy] = True a : Optional[int] = None a : Optional[int] = None a : int = -1_0_0 a : str = "pt" def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]: import torch __lowerCAmelCase = "label" if "label" in features[0].keys() else "labels" __lowerCAmelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __lowerCAmelCase = self.tokenizer.pad( UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , ) if labels is None: return batch __lowerCAmelCase = torch.tensor(batch["entity_ids"] ).shape[1] __lowerCAmelCase = self.tokenizer.padding_side if padding_side == "right": __lowerCAmelCase = [ list(UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase )) for label in labels ] else: __lowerCAmelCase = [ [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase )) + list(UpperCamelCase ) for label in labels ] __lowerCAmelCase = [feature["ner_tags"] for feature in features] __lowerCAmelCase = padding_tensor(UpperCamelCase , -1 , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = [feature["original_entity_spans"] for feature in features] __lowerCAmelCase = padding_tensor(UpperCamelCase , (-1, -1) , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = {k: torch.tensor(UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
700
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
0
import numpy as np def __lowerCAmelCase ( lowerCamelCase : np.array ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCAmelCase ( lowerCamelCase : np.array ): '''simple docstring''' return vector * sigmoid(1.7_0_2 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
701
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"} __lowerCAmelCase = features.copy() __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' if issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = jsonl_path elif issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = [jsonl_path] __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) for split in splits: __lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' if split: __lowerCAmelCase = {split: jsonl_path} else: __lowerCAmelCase = "train" __lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path} __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ): '''simple docstring''' return json.load(lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' return [json.loads(lowerCamelCase ) for line in buffer] class UpperCAmelCase__ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: with pytest.raises(UpperCamelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' __lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() assert exported_content == original_content
39
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline a : int = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""] a : int = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""] a : Dict = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a : Optional[int] = False @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return 32 @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return 32 @property def UpperCAmelCase_ ( self ) -> Tuple: return self.time_input_dim @property def UpperCAmelCase_ ( self ) -> str: return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self ) -> List[str]: return 100 @property def UpperCAmelCase_ ( self ) -> str: torch.manual_seed(0 ) __lowerCAmelCase = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase ) return model @property def UpperCAmelCase_ ( self ) -> Dict: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self ) -> Any: torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowerCAmelCase = DDIMScheduler(**UpperCamelCase ) __lowerCAmelCase = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> List[str]: __lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCamelCase ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) ) # create hint __lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) if str(UpperCamelCase ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(UpperCamelCase ) else: __lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) __lowerCAmelCase = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**UpperCamelCase ) __lowerCAmelCase = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = np.array( [0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowerCAmelCase = init_image.resize((512, 512) ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) __lowerCAmelCase = torch.from_numpy(np.array(UpperCamelCase ) ).float() / 255.0 __lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __lowerCAmelCase = "A robot, 4k photo" __lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase ) __lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(UpperCamelCase ) pipeline.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( UpperCamelCase , image=UpperCamelCase , strength=0.85 , generator=UpperCamelCase , negative_prompt="" , ).to_tuple() __lowerCAmelCase = pipeline( image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , hint=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
702
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : Optional[Any] = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
0
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder lowerCAmelCase : Tuple = '''__DUMMY_TRANSFORMERS_USER__''' lowerCAmelCase : Optional[int] = '''Dummy User''' lowerCAmelCase : Dict = '''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''' lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' lowerCAmelCase : Union[str, Any] = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' lowerCAmelCase : List[Any] = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' lowerCAmelCase : List[Any] = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCamelCase ) @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : int ): '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCamelCase ) @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCamelCase ) @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ): '''simple docstring''' HfFolder.save_token(lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def __lowerCAmelCase ( ): '''simple docstring''' return HfApi(endpoint=lowerCamelCase ) @pytest.fixture(scope="session" ) def __lowerCAmelCase ( lowerCamelCase : HfApi ): '''simple docstring''' __lowerCAmelCase = HfFolder.get_token() HfFolder.save_token(lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(lowerCamelCase ) @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' def _cleanup_repo(lowerCamelCase : Optional[Any] ): hf_api.delete_repo(lowerCamelCase , token=lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : List[str] ): '''simple docstring''' @contextmanager def _temporary_repo(lowerCamelCase : Any ): try: yield repo_id finally: cleanup_repo(lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def __lowerCAmelCase ( lowerCamelCase : HfApi , lowerCamelCase : str , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = f'''repo_txt_data-{int(time.time() * 10e3 )}''' __lowerCAmelCase = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(lowerCamelCase , token=lowerCamelCase , repo_type="dataset" , private=lowerCamelCase ) hf_api.upload_file( token=lowerCamelCase , path_or_fileobj=str(lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(lowerCamelCase , token=lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ): '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def __lowerCAmelCase ( lowerCamelCase : HfApi , lowerCamelCase : str , lowerCamelCase : List[Any] ): '''simple docstring''' __lowerCAmelCase = f'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' __lowerCAmelCase = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(lowerCamelCase , token=lowerCamelCase , repo_type="dataset" , private=lowerCamelCase ) hf_api.upload_file( token=lowerCamelCase , path_or_fileobj=str(lowerCamelCase ) , path_in_repo="data.zip" , repo_id=lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(lowerCamelCase , token=lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Any ): '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def __lowerCAmelCase ( lowerCamelCase : HfApi , lowerCamelCase : Any , lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = f'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' __lowerCAmelCase = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(lowerCamelCase , token=lowerCamelCase , repo_type="dataset" , private=lowerCamelCase ) hf_api.upload_file( token=lowerCamelCase , path_or_fileobj=str(lowerCamelCase ) , path_in_repo="data.zip" , repo_id=lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(lowerCamelCase , token=lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ): '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
703
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[str] = (CMStochasticIterativeScheduler,) a : str = 1_0 def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str: __lowerCAmelCase = { "num_train_timesteps": 201, "sigma_min": 0.0_02, "sigma_max": 80.0, } config.update(**UpperCamelCase ) return config def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = 10 __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps[0] __lowerCAmelCase = scheduler.timesteps[1] __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase_ ( self ) -> Any: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = 1 scheduler.set_timesteps(UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCamelCase ): # 1. scale model input __lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # 2. predict noise residual __lowerCAmelCase = model(UpperCamelCase , UpperCamelCase ) # 3. predict previous sample x_t-1 __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [106, 0] scheduler.set_timesteps(timesteps=UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # 2. predict noise residual __lowerCAmelCase = model(UpperCamelCase , UpperCamelCase ) # 3. predict previous sample x_t-1 __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [39, 30, 12, 15, 0] with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [39, 30, 12, 1, 0] __lowerCAmelCase = len(UpperCamelCase ) with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCamelCase )
39
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Union[str, Any] = """data2vec-text""" def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , UpperCamelCase="absolute" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> Dict: super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class UpperCAmelCase__ ( UpperCamelCase__ ): @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: __lowerCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
704
'''simple docstring''' import requests from bsa import BeautifulSoup def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ): '''simple docstring''' __lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" ) __lowerCAmelCase = soup.findAll("h1" ) __lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f'{key}\n{value}\n')
39
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Dict = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = [ '''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMSNModel''', '''ViTMSNForImageClassification''', '''ViTMSNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
705
'''simple docstring''' from __future__ import annotations import math def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2: raise Exception("Matrices are not 2x2" ) __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(lowerCamelCase ) ) ] def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(lowerCamelCase ) ) ] def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("Odd matrices are not supported!" ) __lowerCAmelCase = len(lowerCamelCase ) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase ) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )] return top_left, top_right, bot_left, bot_right def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' return len(lowerCamelCase ), len(matrix[0] ) def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' print("\n".join(str(lowerCamelCase ) for line in matrix ) ) def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if matrix_dimensions(lowerCamelCase ) == (2, 2): return default_matrix_multiplication(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase ) __lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase ) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(lowerCamelCase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]: __lowerCAmelCase = ( "Unable to multiply these matrices, please check the dimensions.\n" f'''Matrix A: {matrixa}\n''' f'''Matrix B: {matrixa}''' ) raise Exception(lowerCamelCase ) __lowerCAmelCase = matrix_dimensions(lowerCamelCase ) __lowerCAmelCase = matrix_dimensions(lowerCamelCase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase ) __lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) ) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , lowerCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase ) # Removing the additional zeros for i in range(0 , lowerCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": lowerCAmelCase : Tuple = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
39
0
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black lowerCAmelCase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. lowerCAmelCase : Optional[int] = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) ) __lowerCAmelCase = self.diffusers_dir shutil.copy( os.path.join(UpperCamelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , ) def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = "src/diffusers" shutil.rmtree(self.diffusers_dir ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> Optional[Any]: __lowerCAmelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: __lowerCAmelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result __lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) __lowerCAmelCase = black.format_str(UpperCamelCase , mode=UpperCamelCase ) __lowerCAmelCase = os.path.join(self.diffusers_dir , "new_code.py" ) with open(UpperCamelCase , "w" , newline="\n" ) as f: f.write(UpperCamelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase ) with open(UpperCamelCase , "r" ) as f: self.assertTrue(f.read() , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , UpperCamelCase , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , UpperCamelCase ) , ) # Copy consistency with a really long name __lowerCAmelCase = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , UpperCamelCase , UpperCamelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , UpperCamelCase , overwrite_result=re.sub("DDPM" , "Test" , UpperCamelCase ) , )
706
'''simple docstring''' import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput lowerCAmelCase : Optional[Any] = '''scheduler_config.json''' class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = 1 a : Optional[int] = 2 a : int = 3 a : Union[str, Any] = 4 a : int = 5 a : Optional[int] = 6 a : str = 7 a : List[Any] = 8 a : List[str] = 9 a : List[str] = 1_0 a : int = 1_1 a : Any = 1_2 a : Any = 1_3 a : Tuple = 1_4 @dataclass class UpperCAmelCase__ ( UpperCamelCase__ ): a : torch.FloatTensor class UpperCAmelCase__ : a : Tuple = SCHEDULER_CONFIG_NAME a : Union[str, Any] = [] a : str = True @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict: self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def UpperCAmelCase_ ( self ) -> str: return self._get_compatibles() @classmethod def UpperCAmelCase_ ( cls ) -> Tuple: __lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) ) __lowerCAmelCase = importlib.import_module(__name__.split("." )[0] ) __lowerCAmelCase = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
39
0
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=30 , UpperCamelCase=2 , UpperCamelCase=3 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=None , UpperCamelCase=2 , ) -> List[str]: __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = scope __lowerCAmelCase = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCAmelCase = (image_size // patch_size) ** 2 __lowerCAmelCase = num_patches + 1 def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ) -> List[str]: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: __lowerCAmelCase = ViTModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = ViTForMaskedImageModeling(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = ViTForMaskedImageModeling(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = self.type_sequence_label_size __lowerCAmelCase = ViTForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __lowerCAmelCase = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = ViTForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a : int = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) a : str = ( {"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification} if is_torch_available() else {} ) a : Optional[int] = True a : Optional[Any] = False a : List[Any] = False a : str = False def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = ViTModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def UpperCAmelCase_ ( self ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def UpperCAmelCase_ ( self ) -> str: pass def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(UpperCamelCase ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def UpperCAmelCase_ ( self ) -> str: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = ViTModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCAmelCase__ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self ) -> Any: return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(UpperCamelCase ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**UpperCamelCase ) # verify the logits __lowerCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) __lowerCAmelCase = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self ) -> Any: # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. __lowerCAmelCase = ViTModel.from_pretrained("facebook/dino-vits8" ).to(UpperCamelCase ) __lowerCAmelCase = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 ) __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=UpperCamelCase , return_tensors="pt" ) __lowerCAmelCase = inputs.pixel_values.to(UpperCamelCase ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(UpperCamelCase , interpolate_pos_encoding=UpperCamelCase ) # verify the logits __lowerCAmelCase = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase ) __lowerCAmelCase = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=UpperCamelCase , return_tensors="pt" ) __lowerCAmelCase = inputs.pixel_values.to(UpperCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __lowerCAmelCase = model(UpperCamelCase )
707
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger lowerCAmelCase : List[Any] = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , UpperCamelCase = None ) -> Union[str, Any]: __lowerCAmelCase = ( os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __lowerCAmelCase = Extractor def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __lowerCAmelCase = os.path.abspath(UpperCamelCase ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool: return force_extract or ( not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase )) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str: __lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase ) if not extractor_format: return input_path __lowerCAmelCase = self._get_output_path(UpperCamelCase ) if self._do_extract(UpperCamelCase , UpperCamelCase ): self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return output_path class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod @abstractmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: ... @staticmethod @abstractmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: ... class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): a : List[bytes] = [] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]: with open(UpperCamelCase , "rb" ) as f: return f.read(UpperCamelCase ) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if not magic_number: __lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) try: __lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase ) except OSError: return False return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: return tarfile.is_tarfile(UpperCamelCase ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: def resolved(UpperCamelCase ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase ) ) def badpath(UpperCamelCase , UpperCamelCase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase ) def badlink(UpperCamelCase , UpperCamelCase ) -> bool: # Links are interpreted relative to the directory containing the link __lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase ) __lowerCAmelCase = resolved(UpperCamelCase ) for finfo in members: if badpath(finfo.name , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = tarfile.open(UpperCamelCase ) tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x1F\x8B"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with gzip.open(UpperCamelCase , "rb" ) as gzip_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[Any] = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase , "rb" ) as fp: __lowerCAmelCase = _EndRecData(UpperCamelCase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be if len(UpperCamelCase ) == sizeCentralDir: __lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file: zip_file.extractall(UpperCamelCase ) zip_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with lzma.open(UpperCamelCase ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile" ) import rarfile os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = rarfile.RarFile(UpperCamelCase ) rf.extractall(UpperCamelCase ) rf.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : int = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard" ) import zstandard as zstd __lowerCAmelCase = zstd.ZstdDecompressor() with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh: dctx.copy_stream(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x42\x5A\x68"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with bza.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr" ) import pyazr os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive: archive.extractall(UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x04\x22\x4D\x18"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4" ) import lza.frame with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) a : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def UpperCAmelCase_ ( cls ) -> Optional[Any]: return max( len(UpperCamelCase ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase , UpperCamelCase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase ) except OSError: return b"" @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool: warnings.warn( "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'infer_extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/> __lowerCAmelCase = cls._get_magic_number_max_length() __lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return extractor_format @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase ) # Prevent parallel extractions __lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) ) with FileLock(UpperCamelCase ): shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg warnings.warn( "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format else: __lowerCAmelCase = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase , UpperCamelCase ) else: warnings.warn( "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0." , category=UpperCamelCase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase ): return extractor.extract(UpperCamelCase , UpperCamelCase )
39
0
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"} __lowerCAmelCase = features.copy() __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' if issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = jsonl_path elif issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = [jsonl_path] __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) for split in splits: __lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' if split: __lowerCAmelCase = {split: jsonl_path} else: __lowerCAmelCase = "train" __lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path} __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ): '''simple docstring''' return json.load(lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' return [json.loads(lowerCamelCase ) for line in buffer] class UpperCAmelCase__ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: with pytest.raises(UpperCamelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' __lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() assert exported_content == original_content
708
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self ) -> List[str]: # test for the above condition self.test() def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = 0 __lowerCAmelCase = False while not completed: if counter == 1: self.reset() __lowerCAmelCase = self.advance() if not self.does_advance(UpperCamelCase ): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase ) counter += 1 if counter > 1_0000: raise Exception("update() does not fulfill the constraint." ) if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly." ) @abstractmethod def UpperCAmelCase_ ( self ) -> Dict: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> Dict: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) __lowerCAmelCase = token_ids __lowerCAmelCase = len(self.token_ids ) __lowerCAmelCase = -1 # the index of the currently fulfilled step __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> Optional[int]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.fulfilled_idx += 1 __lowerCAmelCase = True if self.fulfilled_idx == (self.seqlen - 1): __lowerCAmelCase = True __lowerCAmelCase = completed else: # failed to make progress. __lowerCAmelCase = True self.reset() return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = False __lowerCAmelCase = 0 def UpperCAmelCase_ ( self ) -> Optional[int]: return self.seqlen - (self.fulfilled_idx + 1) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]: __lowerCAmelCase = PhrasalConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.fulfilled_idx __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]: __lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] ) __lowerCAmelCase = {} for token_ids in nested_token_ids: __lowerCAmelCase = root for tidx, token_id in enumerate(UpperCamelCase ): if token_id not in level: __lowerCAmelCase = {} __lowerCAmelCase = level[token_id] if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" F''' {nested_token_ids}.''' ) __lowerCAmelCase = root def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: __lowerCAmelCase = self.trie for current_token in current_seq: __lowerCAmelCase = start[current_token] __lowerCAmelCase = list(start.keys() ) return next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: __lowerCAmelCase = self.next_tokens(UpperCamelCase ) return len(UpperCamelCase ) == 0 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = list(root.values() ) if len(UpperCamelCase ) == 0: return 1 else: return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: __lowerCAmelCase = self.count_leaves(UpperCamelCase ) return len(UpperCamelCase ) != leaf_count class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> List[Any]: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) __lowerCAmelCase = DisjunctiveTrie(UpperCamelCase ) __lowerCAmelCase = nested_token_ids __lowerCAmelCase = self.trie.max_height __lowerCAmelCase = [] __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.current_seq.append(UpperCamelCase ) __lowerCAmelCase = True else: __lowerCAmelCase = True self.reset() __lowerCAmelCase = self.trie.reached_leaf(self.current_seq ) __lowerCAmelCase = completed return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = False __lowerCAmelCase = [] def UpperCAmelCase_ ( self ) -> int: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]: __lowerCAmelCase = DisjunctiveConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.current_seq __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = constraints # max # of steps required to fulfill a given constraint __lowerCAmelCase = max([c.seqlen for c in constraints] ) __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = False self.init_state() def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = [] __lowerCAmelCase = None __lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints] def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __lowerCAmelCase = constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) else: __lowerCAmelCase = self.inprogress_constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) __lowerCAmelCase , __lowerCAmelCase = False, False if self.completed: __lowerCAmelCase = True __lowerCAmelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) ) __lowerCAmelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __lowerCAmelCase = None if len(self.pending_constraints ) == 0: # we're done! __lowerCAmelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(UpperCamelCase ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase ) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(UpperCamelCase ) __lowerCAmelCase = None if not complete and stepped: __lowerCAmelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __lowerCAmelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __lowerCAmelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str: __lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __lowerCAmelCase = [ constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase ) __lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
39
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''', } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Dict = """gpt_neox_japanese""" def __init__( self , UpperCamelCase=3_2000 , UpperCamelCase=2560 , UpperCamelCase=32 , UpperCamelCase=32 , UpperCamelCase=4 , UpperCamelCase="gelu" , UpperCamelCase=1.00 , UpperCamelCase=1_0000 , UpperCamelCase=2048 , UpperCamelCase=0.02 , UpperCamelCase=1E-5 , UpperCamelCase=True , UpperCamelCase=3_1996 , UpperCamelCase=3_1999 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , **UpperCamelCase , ) -> Any: super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_multiple_size __lowerCAmelCase = hidden_act __lowerCAmelCase = rotary_pct __lowerCAmelCase = rotary_emb_base __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = use_cache __lowerCAmelCase = attention_dropout __lowerCAmelCase = hidden_dropout
709
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : List[Any] = KandinskyImgaImgPipeline a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""] a : List[Any] = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", ] a : Any = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a : Union[str, Any] = False @property def UpperCAmelCase_ ( self ) -> int: return 32 @property def UpperCAmelCase_ ( self ) -> List[str]: return 32 @property def UpperCAmelCase_ ( self ) -> Dict: return self.time_input_dim @property def UpperCAmelCase_ ( self ) -> int: return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self ) -> int: return 100 @property def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) __lowerCAmelCase = MultilingualCLIP(UpperCamelCase ) __lowerCAmelCase = text_encoder.eval() return text_encoder @property def UpperCAmelCase_ ( self ) -> List[str]: torch.manual_seed(0 ) __lowerCAmelCase = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase ) return model @property def UpperCAmelCase_ ( self ) -> List[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self ) -> Dict: torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowerCAmelCase = DDIMScheduler(**UpperCamelCase ) __lowerCAmelCase = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]: __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(UpperCamelCase ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(UpperCamelCase ) else: __lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) __lowerCAmelCase = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**UpperCamelCase ) __lowerCAmelCase = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowerCAmelCase = "A red cartoon frog, 4k" __lowerCAmelCase = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase ) __lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(UpperCamelCase ) pipeline.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase = pipeline( UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
39
0
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ): '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase ) ) def __lowerCAmelCase ( lowerCamelCase : list[list[int]] , lowerCamelCase : int , lowerCamelCase : list[int] , lowerCamelCase : int ): '''simple docstring''' if index == len(lowerCamelCase ): return True # Recursive Step for i in range(lowerCamelCase ): if valid_coloring(graph[index] , lowerCamelCase , lowerCamelCase ): # Color current vertex __lowerCAmelCase = i # Validate coloring if util_color(lowerCamelCase , lowerCamelCase , lowerCamelCase , index + 1 ): return True # Backtrack __lowerCAmelCase = -1 return False def __lowerCAmelCase ( lowerCamelCase : list[list[int]] , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = [-1] * len(lowerCamelCase ) if util_color(lowerCamelCase , lowerCamelCase , lowerCamelCase , 0 ): return colored_vertices return []
710
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') lowerCAmelCase : Any = logging.getLogger(__name__) @dataclass class UpperCAmelCase__ : a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a : bool = field( default=UpperCamelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class UpperCAmelCase__ : a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) a : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : bool = field( default=UpperCamelCase__ , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def UpperCAmelCase_ ( self ) -> Tuple: if self.train_file is not None: __lowerCAmelCase = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __lowerCAmelCase = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class UpperCAmelCase__ : a : PreTrainedTokenizerBase a : Union[bool, str, PaddingStrategy] = True a : Optional[int] = None a : Optional[int] = None def __call__( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = "label" if "label" in features[0].keys() else "labels" __lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features] __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = len(features[0]["input_ids"] ) __lowerCAmelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features ] __lowerCAmelCase = list(chain(*UpperCamelCase ) ) __lowerCAmelCase = self.tokenizer.pad( UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten __lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()} # Add back labels __lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa ) return batch def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowerCamelCase ) datasets.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __lowerCAmelCase = {} if data_args.train_file is not None: __lowerCAmelCase = data_args.train_file if data_args.validation_file is not None: __lowerCAmelCase = data_args.validation_file __lowerCAmelCase = data_args.train_file.split("." )[-1] __lowerCAmelCase = load_dataset( lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. __lowerCAmelCase = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. __lowerCAmelCase = [f'''ending{i}''' for i in range(4 )] __lowerCAmelCase = "sent1" __lowerCAmelCase = "sent2" if data_args.max_seq_length is None: __lowerCAmelCase = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) __lowerCAmelCase = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(lowerCamelCase : Tuple ): __lowerCAmelCase = [[context] * 4 for context in examples[context_name]] __lowerCAmelCase = examples[question_header_name] __lowerCAmelCase = [ [f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase ) ] # Flatten out __lowerCAmelCase = list(chain(*lowerCamelCase ) ) __lowerCAmelCase = list(chain(*lowerCamelCase ) ) # Tokenize __lowerCAmelCase = tokenizer( lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) __lowerCAmelCase = raw_datasets["train"] if data_args.max_train_samples is not None: __lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples ) __lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): __lowerCAmelCase = train_dataset.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) __lowerCAmelCase = raw_datasets["validation"] if data_args.max_eval_samples is not None: __lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples ) __lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): __lowerCAmelCase = eval_dataset.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator __lowerCAmelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(lowerCamelCase : Dict ): __lowerCAmelCase , __lowerCAmelCase = eval_predictions __lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase ) trainer.save_model() # Saves the tokenizer too for easy upload __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase ) ) __lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics("train" , lowerCamelCase ) trainer.save_metrics("train" , lowerCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowerCAmelCase = trainer.evaluate() __lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase ) __lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics("eval" , lowerCamelCase ) trainer.save_metrics("eval" , lowerCamelCase ) __lowerCAmelCase = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**lowerCamelCase ) else: trainer.create_model_card(**lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
39
0
'''simple docstring''' from itertools import product def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = sides_number __lowerCAmelCase = max_face_number * dice_number __lowerCAmelCase = [0] * (max_total + 1) __lowerCAmelCase = 1 __lowerCAmelCase = range(lowerCamelCase , max_face_number + 1 ) for dice_numbers in product(lowerCamelCase , repeat=lowerCamelCase ): __lowerCAmelCase = sum(lowerCamelCase ) totals_frequencies[total] += 1 return totals_frequencies def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = total_frequency_distribution( sides_number=4 , dice_number=9 ) __lowerCAmelCase = total_frequency_distribution( sides_number=6 , dice_number=6 ) __lowerCAmelCase = 0 __lowerCAmelCase = 9 __lowerCAmelCase = 4 * 9 __lowerCAmelCase = 6 for peter_total in range(lowerCamelCase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) __lowerCAmelCase = (4**9) * (6**6) __lowerCAmelCase = peter_wins_count / total_games_number __lowerCAmelCase = round(lowerCamelCase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f'{solution() = }')
711
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {} lowerCAmelCase : Dict[Optional[str], str] = {} lowerCAmelCase : Dict[Optional[str], Exception] = {} def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __lowerCAmelCase = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __lowerCAmelCase = format_type def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __lowerCAmelCase = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def __lowerCAmelCase ( lowerCamelCase : Optional[str] ): '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = get_format_type_from_alias(lowerCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
39
0
'''simple docstring''' from math import factorial lowerCAmelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)} def __lowerCAmelCase ( lowerCamelCase : int ): '''simple docstring''' if not isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError("Parameter number must be int" ) if number < 0: raise ValueError("Parameter number must be greater than or equal to 0" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(lowerCamelCase ) ) def __lowerCAmelCase ( lowerCamelCase : int = 60 , lowerCamelCase : int = 1_00_00_00 ): '''simple docstring''' if not isinstance(lowerCamelCase , lowerCamelCase ) or not isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError("Parameters chain_length and number_limit must be int" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( "Parameters chain_length and number_limit must be greater than 0" ) # the counter for the chains with the exact desired length __lowerCAmelCase = 0 # the cached sizes of the previous chains __lowerCAmelCase = {} for start_chain_element in range(1 , lowerCamelCase ): # The temporary set will contain the elements of the chain __lowerCAmelCase = set() __lowerCAmelCase = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. __lowerCAmelCase = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(lowerCamelCase ) chain_set_length += 1 __lowerCAmelCase = digit_factorial_sum(lowerCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] __lowerCAmelCase = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f'{solution()}')
712
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCAmelCase ( lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowerCAmelCase = [3, 3, 3, 3] __lowerCAmelCase = [5, 5, 5, 5] elif "fl4" in model_name: __lowerCAmelCase = [4, 4, 4, 4] __lowerCAmelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowerCAmelCase = [3, 3, 3, 3] if "lrf" in model_name: __lowerCAmelCase = [3, 3, 3, 3] else: __lowerCAmelCase = [2, 2, 2, 2] if "tiny" in model_name: __lowerCAmelCase = 96 elif "small" in model_name: __lowerCAmelCase = 96 elif "base" in model_name: __lowerCAmelCase = 1_28 elif "large" in model_name: __lowerCAmelCase = 1_92 elif "xlarge" in model_name: __lowerCAmelCase = 2_56 elif "huge" in model_name: __lowerCAmelCase = 3_52 # set label information __lowerCAmelCase = "huggingface/label-files" if "large" in model_name or "huge" in model_name: __lowerCAmelCase = "imagenet-22k-id2label.json" else: __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) ) __lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()} __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = FocalNetConfig( embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , ) return config def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' if "patch_embed.proj" in name: __lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowerCAmelCase = "encoder." + name if "encoder.layers" in name: __lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: __lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: __lowerCAmelCase = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": __lowerCAmelCase = "layernorm.weight" if name == "norm.bias": __lowerCAmelCase = "layernorm.bias" if "head" in name: __lowerCAmelCase = name.replace("head" , "classifier" ) else: __lowerCAmelCase = "focalnet." + name return name def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ): '''simple docstring''' __lowerCAmelCase = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on __lowerCAmelCase = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase ) __lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): __lowerCAmelCase = state_dict.pop(lowerCamelCase ) __lowerCAmelCase = val __lowerCAmelCase = get_focalnet_config(lowerCamelCase ) __lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase ) model.eval() # load state dict model.load_state_dict(lowerCamelCase ) # verify conversion __lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCAmelCase = BitImageProcessor( do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , ) __lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) __lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" ) __lowerCAmelCase = transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) __lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 ) __lowerCAmelCase = model(**lowerCamelCase ) __lowerCAmelCase = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": __lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": __lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": __lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": __lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": __lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: print(f'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(f'''{model_name}''' ) processor.push_to_hub(f'''{model_name}''' ) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase : Optional[int] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
39
0
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float = 1 / sqrt(2 ) ): '''simple docstring''' __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCamelCase ) __lowerCAmelCase = cos(lowerCamelCase ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = (1 - _cos) / 2 __lowerCAmelCase = 1 - _cos __lowerCAmelCase = 1 + alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float = 1 / sqrt(2 ) ): '''simple docstring''' __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCamelCase ) __lowerCAmelCase = cos(lowerCamelCase ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = (1 + _cos) / 2 __lowerCAmelCase = -1 - _cos __lowerCAmelCase = 1 + alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float = 1 / sqrt(2 ) ): '''simple docstring''' __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCamelCase ) __lowerCAmelCase = cos(lowerCamelCase ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = _sin / 2 __lowerCAmelCase = 0 __lowerCAmelCase = -ba __lowerCAmelCase = 1 + alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float = 1 / sqrt(2 ) ): '''simple docstring''' __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCamelCase ) __lowerCAmelCase = cos(lowerCamelCase ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 1 - alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 + alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : float = 1 / sqrt(2 ) , ): '''simple docstring''' __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCamelCase ) __lowerCAmelCase = cos(lowerCamelCase ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 10 ** (gain_db / 40) __lowerCAmelCase = 1 + alpha * big_a __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha * big_a __lowerCAmelCase = 1 + alpha / big_a __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha / big_a __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : float = 1 / sqrt(2 ) , ): '''simple docstring''' __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCamelCase ) __lowerCAmelCase = cos(lowerCamelCase ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 10 ** (gain_db / 40) __lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos __lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos __lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos __lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos __lowerCAmelCase = 2 * sqrt(lowerCamelCase ) * alpha __lowerCAmelCase = big_a * (pmc + aaa) __lowerCAmelCase = 2 * big_a * mpc __lowerCAmelCase = big_a * (pmc - aaa) __lowerCAmelCase = ppmc + aaa __lowerCAmelCase = -2 * pmpc __lowerCAmelCase = ppmc - aaa __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : float = 1 / sqrt(2 ) , ): '''simple docstring''' __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCamelCase ) __lowerCAmelCase = cos(lowerCamelCase ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 10 ** (gain_db / 40) __lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos __lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos __lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos __lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos __lowerCAmelCase = 2 * sqrt(lowerCamelCase ) * alpha __lowerCAmelCase = big_a * (ppmc + aaa) __lowerCAmelCase = -2 * big_a * pmpc __lowerCAmelCase = big_a * (ppmc - aaa) __lowerCAmelCase = pmc + aaa __lowerCAmelCase = 2 * mpc __lowerCAmelCase = pmc - aaa __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
713
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase : str = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase : Optional[Any] = { '''squeezebert/squeezebert-uncased''': 5_1_2, '''squeezebert/squeezebert-mnli''': 5_1_2, '''squeezebert/squeezebert-mnli-headless''': 5_1_2, } lowerCAmelCase : Tuple = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Dict = VOCAB_FILES_NAMES a : Any = PRETRAINED_VOCAB_FILES_MAP a : Dict = PRETRAINED_INIT_CONFIGURATION a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Optional[Any] = SqueezeBertTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]: super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars ): __lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) ) __lowerCAmelCase = do_lower_case __lowerCAmelCase = strip_accents __lowerCAmelCase = tokenize_chinese_chars __lowerCAmelCase = normalizer_class(**UpperCamelCase ) __lowerCAmelCase = do_lower_case def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str: __lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]: __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]: __lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase )
39
0
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) lowerCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase : Tuple = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Union[str, Any]=8 ): '''simple docstring''' __lowerCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __lowerCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Union[str, Any]: super().__init__() self.register_modules( unet=UpperCamelCase , scheduler=UpperCamelCase , movq=UpperCamelCase , ) __lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: if latents is None: __lowerCAmelCase = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) __lowerCAmelCase = latents.to(UpperCamelCase ) __lowerCAmelCase = latents * scheduler.init_noise_sigma return latents def UpperCAmelCase_ ( self , UpperCamelCase=0 ) -> Any: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) __lowerCAmelCase = torch.device(F'''cuda:{gpu_id}''' ) __lowerCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) __lowerCAmelCase = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __lowerCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: __lowerCAmelCase , __lowerCAmelCase = cpu_offload_with_hook(UpperCamelCase , UpperCamelCase , prev_module_hook=UpperCamelCase ) # We'll offload the last model manually. __lowerCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase_ ( self ) -> Union[str, Any]: if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 100 , UpperCamelCase = 4.0 , UpperCamelCase = 1 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , ) -> int: __lowerCAmelCase = self._execution_device __lowerCAmelCase = guidance_scale > 1.0 if isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = torch.cat(UpperCamelCase , dim=0 ) __lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt if isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = torch.cat(UpperCamelCase , dim=0 ) if do_classifier_free_guidance: __lowerCAmelCase = image_embeds.repeat_interleave(UpperCamelCase , dim=0 ) __lowerCAmelCase = negative_image_embeds.repeat_interleave(UpperCamelCase , dim=0 ) __lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase ) self.scheduler.set_timesteps(UpperCamelCase , device=UpperCamelCase ) __lowerCAmelCase = self.scheduler.timesteps __lowerCAmelCase = self.unet.config.in_channels __lowerCAmelCase , __lowerCAmelCase = downscale_height_and_width(UpperCamelCase , UpperCamelCase , self.movq_scale_factor ) # create initial latent __lowerCAmelCase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase , UpperCamelCase , UpperCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance __lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowerCAmelCase = {"image_embeds": image_embeds} __lowerCAmelCase = self.unet( sample=UpperCamelCase , timestep=UpperCamelCase , encoder_hidden_states=UpperCamelCase , added_cond_kwargs=UpperCamelCase , return_dict=UpperCamelCase , )[0] if do_classifier_free_guidance: __lowerCAmelCase , __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) __lowerCAmelCase , __lowerCAmelCase = noise_pred.chunk(2 ) __lowerCAmelCase , __lowerCAmelCase = variance_pred.chunk(2 ) __lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __lowerCAmelCase , __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCAmelCase = self.scheduler.step( UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase , )[0] # post-processing __lowerCAmelCase = self.movq.decode(UpperCamelCase , force_not_quantize=UpperCamelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: __lowerCAmelCase = image * 0.5 + 0.5 __lowerCAmelCase = image.clamp(0 , 1 ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowerCAmelCase = self.numpy_to_pil(UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase )
714
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(lowerCamelCase ) / len(lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
39
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : Optional[Any] = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
715
'''simple docstring''' import re def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' __lowerCAmelCase = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
39
0
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' __lowerCAmelCase = [0 for i in range(len(lowerCamelCase ) )] # initialize interval's left pointer and right pointer __lowerCAmelCase , __lowerCAmelCase = 0, 0 for i in range(1 , len(lowerCamelCase ) ): # case when current index is inside the interval if i <= right_pointer: __lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __lowerCAmelCase = min_edge while go_next(lowerCamelCase , lowerCamelCase , lowerCamelCase ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __lowerCAmelCase , __lowerCAmelCase = i, i + z_result[i] - 1 return z_result def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : list[int] , lowerCamelCase : str ): '''simple docstring''' return i + z_result[i] < len(lowerCamelCase ) and s[z_result[i]] == s[i + z_result[i]] def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : str ): '''simple docstring''' __lowerCAmelCase = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __lowerCAmelCase = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(lowerCamelCase ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
716
'''simple docstring''' import os import sys import unittest lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = {"BertModelTest": "BertModelTester"} __lowerCAmelCase = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase ) __lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase ) __lowerCAmelCase = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } __lowerCAmelCase = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } __lowerCAmelCase = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
39
0
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase : Tuple = { '''vocab_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''', }, '''merges_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''', }, } lowerCAmelCase : str = { '''gpt2''': 1_0_2_4, '''gpt2-medium''': 1_0_2_4, '''gpt2-large''': 1_0_2_4, '''gpt2-xl''': 1_0_2_4, '''distilgpt2''': 1_0_2_4, } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Dict = VOCAB_FILES_NAMES a : Any = PRETRAINED_VOCAB_FILES_MAP a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Optional[Any] = ["""input_ids""", """attention_mask"""] a : Union[str, Any] = GPTaTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase=False , **UpperCamelCase , ) -> int: super().__init__( UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = kwargs.pop("add_bos_token" , UpperCamelCase ) __lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space: __lowerCAmelCase = getattr(UpperCamelCase , pre_tok_state.pop("type" ) ) __lowerCAmelCase = add_prefix_space __lowerCAmelCase = pre_tok_class(**UpperCamelCase ) __lowerCAmelCase = add_prefix_space def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> BatchEncoding: __lowerCAmelCase = kwargs.get("is_split_into_words" , UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase ) def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> BatchEncoding: __lowerCAmelCase = kwargs.get("is_split_into_words" , UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase , **UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]: __lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[int]: __lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] ) if len(UpperCamelCase ) > self.model_max_length: __lowerCAmelCase = input_ids[-self.model_max_length :] return input_ids
717
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class UpperCAmelCase__ ( UpperCamelCase__ ): a : torch.FloatTensor class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]: super().__init__() __lowerCAmelCase = num_attention_heads __lowerCAmelCase = attention_head_dim __lowerCAmelCase = num_attention_heads * attention_head_dim __lowerCAmelCase = in_channels __lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) # 3. Define transformers blocks __lowerCAmelCase = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , ) for d in range(UpperCamelCase ) ] ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape __lowerCAmelCase = batch_frames // num_frames __lowerCAmelCase = hidden_states __lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __lowerCAmelCase = self.norm(UpperCamelCase ) __lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = self.proj_in(UpperCamelCase ) # 2. Blocks for block in self.transformer_blocks: __lowerCAmelCase = block( UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , ) # 3. Output __lowerCAmelCase = self.proj_out(UpperCamelCase ) __lowerCAmelCase = ( hidden_states[None, None, :] .reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=UpperCamelCase )
39
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCAmelCase ( lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowerCAmelCase = [3, 3, 3, 3] __lowerCAmelCase = [5, 5, 5, 5] elif "fl4" in model_name: __lowerCAmelCase = [4, 4, 4, 4] __lowerCAmelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowerCAmelCase = [3, 3, 3, 3] if "lrf" in model_name: __lowerCAmelCase = [3, 3, 3, 3] else: __lowerCAmelCase = [2, 2, 2, 2] if "tiny" in model_name: __lowerCAmelCase = 96 elif "small" in model_name: __lowerCAmelCase = 96 elif "base" in model_name: __lowerCAmelCase = 1_28 elif "large" in model_name: __lowerCAmelCase = 1_92 elif "xlarge" in model_name: __lowerCAmelCase = 2_56 elif "huge" in model_name: __lowerCAmelCase = 3_52 # set label information __lowerCAmelCase = "huggingface/label-files" if "large" in model_name or "huge" in model_name: __lowerCAmelCase = "imagenet-22k-id2label.json" else: __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) ) __lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()} __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = FocalNetConfig( embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , ) return config def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' if "patch_embed.proj" in name: __lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowerCAmelCase = "encoder." + name if "encoder.layers" in name: __lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: __lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: __lowerCAmelCase = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": __lowerCAmelCase = "layernorm.weight" if name == "norm.bias": __lowerCAmelCase = "layernorm.bias" if "head" in name: __lowerCAmelCase = name.replace("head" , "classifier" ) else: __lowerCAmelCase = "focalnet." + name return name def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ): '''simple docstring''' __lowerCAmelCase = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on __lowerCAmelCase = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase ) __lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): __lowerCAmelCase = state_dict.pop(lowerCamelCase ) __lowerCAmelCase = val __lowerCAmelCase = get_focalnet_config(lowerCamelCase ) __lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase ) model.eval() # load state dict model.load_state_dict(lowerCamelCase ) # verify conversion __lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCAmelCase = BitImageProcessor( do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , ) __lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) __lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" ) __lowerCAmelCase = transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) __lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 ) __lowerCAmelCase = model(**lowerCamelCase ) __lowerCAmelCase = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": __lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": __lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": __lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": __lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": __lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: print(f'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(f'''{model_name}''' ) processor.push_to_hub(f'''{model_name}''' ) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase : Optional[int] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
718
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" __lowerCAmelCase = "f32le" __lowerCAmelCase = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: __lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error __lowerCAmelCase = output_stream[0] __lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" if format_for_conversion == "s16le": __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) __lowerCAmelCase = platform.system() if system == "Linux": __lowerCAmelCase = "alsa" __lowerCAmelCase = "default" elif system == "Darwin": __lowerCAmelCase = "avfoundation" __lowerCAmelCase = ":0" elif system == "Windows": __lowerCAmelCase = "dshow" __lowerCAmelCase = "default" __lowerCAmelCase = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase ) for item in iterator: yield item def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ): '''simple docstring''' if stream_chunk_s is not None: __lowerCAmelCase = stream_chunk_s else: __lowerCAmelCase = chunk_length_s __lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": __lowerCAmelCase = np.intaa __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = np.floataa __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: __lowerCAmelCase = chunk_length_s / 6 __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase , (int, float) ): __lowerCAmelCase = [stride_length_s, stride_length_s] __lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __lowerCAmelCase = datetime.datetime.now() __lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ): # Put everything back in numpy scale __lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase ) __lowerCAmelCase = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) __lowerCAmelCase = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ): '''simple docstring''' __lowerCAmelCase = B"" __lowerCAmelCase , __lowerCAmelCase = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) __lowerCAmelCase = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: __lowerCAmelCase = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator __lowerCAmelCase = (_stride_left, stride_right) __lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride} if stream: __lowerCAmelCase = False yield item __lowerCAmelCase = stride_left __lowerCAmelCase = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: __lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)} if stream: __lowerCAmelCase = False yield item def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process: while True: __lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
39
0
'''simple docstring''' from typing import List from .keymap import KEYMAP, get_character def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' def decorator(lowerCamelCase : List[Any] ): __lowerCAmelCase = getattr(lowerCamelCase , "handle_key" , [] ) handle += [key] setattr(lowerCamelCase , "handle_key" , lowerCamelCase ) return func return decorator def __lowerCAmelCase ( *lowerCamelCase : List[str] ): '''simple docstring''' def decorator(lowerCamelCase : List[Any] ): __lowerCAmelCase = getattr(lowerCamelCase , "handle_key" , [] ) handle += keys setattr(lowerCamelCase , "handle_key" , lowerCamelCase ) return func return decorator class UpperCAmelCase__ ( UpperCamelCase__ ): def __new__( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: __lowerCAmelCase = super().__new__(cls , UpperCamelCase , UpperCamelCase , UpperCamelCase ) if not hasattr(UpperCamelCase , "key_handler" ): setattr(UpperCamelCase , "key_handler" , {} ) setattr(UpperCamelCase , "handle_input" , KeyHandler.handle_input ) for value in attrs.values(): __lowerCAmelCase = getattr(UpperCamelCase , "handle_key" , [] ) for key in handled_keys: __lowerCAmelCase = value return new_cls @staticmethod def UpperCAmelCase_ ( cls ) -> Dict: __lowerCAmelCase = get_character() if char != KEYMAP["undefined"]: __lowerCAmelCase = ord(UpperCamelCase ) __lowerCAmelCase = cls.key_handler.get(UpperCamelCase ) if handler: __lowerCAmelCase = char return handler(cls ) else: return None def __lowerCAmelCase ( cls : List[str] ): '''simple docstring''' return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
719
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def __lowerCAmelCase ( lowerCamelCase : List[str] ): '''simple docstring''' return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class UpperCAmelCase__ ( UpperCamelCase__ ): @staticmethod def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple: __lowerCAmelCase = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" ) download_parser.set_defaults(func=UpperCamelCase ) def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: __lowerCAmelCase = model __lowerCAmelCase = cache __lowerCAmelCase = force __lowerCAmelCase = trust_remote_code def UpperCAmelCase_ ( self ) -> Any: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
39
0
'''simple docstring''' import re def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' __lowerCAmelCase = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
720
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
39
0
'''simple docstring''' import collections import os import re from pathlib import Path lowerCAmelCase : int = '''src/transformers''' # Matches is_xxx_available() lowerCAmelCase : int = re.compile(r'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} lowerCAmelCase : int = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase : Optional[int] = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available lowerCAmelCase : Dict = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase : Any = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase : str = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase : Tuple = re.compile(r'''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase : Any = re.compile(r'''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo lowerCAmelCase : str = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: lowerCAmelCase : Optional[int] = re.compile(r'''^\s*try:''') # Catches a line with else: lowerCAmelCase : Dict = re.compile(r'''^\s*else:''') def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase ) is None: return None __lowerCAmelCase = [b[0] for b in _re_backend.findall(lowerCamelCase )] backends.sort() return "_and_".join(lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Any ): '''simple docstring''' with open(lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.readlines() __lowerCAmelCase = 0 while line_index < len(lowerCamelCase ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase ): return None # First grab the objects without a specific backend in _import_structure __lowerCAmelCase = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: __lowerCAmelCase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase ): __lowerCAmelCase = _re_one_line_import_struct.search(lowerCamelCase ).groups()[0] __lowerCAmelCase = re.findall(r"\[([^\]]+)\]" , lowerCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue __lowerCAmelCase = _re_import_struct_key_value.search(lowerCamelCase ) if single_line_import_search is not None: __lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowerCamelCase ) > 0] objects.extend(lowerCamelCase ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 __lowerCAmelCase = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. __lowerCAmelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowerCAmelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowerCAmelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): __lowerCAmelCase = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase ) is not None: __lowerCAmelCase = _re_import_struct_add_many.search(lowerCamelCase ).groups()[0].split(", " ) __lowerCAmelCase = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0] objects.extend(lowerCamelCase ) elif _re_between_brackets.search(lowerCamelCase ) is not None: __lowerCAmelCase = _re_between_brackets.search(lowerCamelCase ).groups()[0].split(", " ) __lowerCAmelCase = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0] objects.extend(lowerCamelCase ) elif _re_quote_object.search(lowerCamelCase ) is not None: objects.append(_re_quote_object.search(lowerCamelCase ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 __lowerCAmelCase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __lowerCAmelCase = [] while ( line_index < len(lowerCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): __lowerCAmelCase = lines[line_index] __lowerCAmelCase = _re_import.search(lowerCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 __lowerCAmelCase = {"none": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. __lowerCAmelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowerCAmelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowerCAmelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): __lowerCAmelCase = lines[line_index] __lowerCAmelCase = _re_import.search(lowerCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 __lowerCAmelCase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Dict ): '''simple docstring''' def find_duplicates(lowerCamelCase : Tuple ): return [k for k, v in collections.Counter(lowerCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __lowerCAmelCase = [] for key in import_dict_objects.keys(): __lowerCAmelCase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __lowerCAmelCase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __lowerCAmelCase = "base imports" if key == "none" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = [] for root, _, files in os.walk(lowerCamelCase ): if "__init__.py" in files: __lowerCAmelCase = os.path.join(lowerCamelCase , "__init__.py" ) __lowerCAmelCase = parse_init(lowerCamelCase ) if objects is not None: __lowerCAmelCase = analyze_results(*lowerCamelCase ) if len(lowerCamelCase ) > 0: __lowerCAmelCase = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("\n".join(lowerCamelCase ) ) if len(lowerCamelCase ) > 0: raise ValueError("\n\n".join(lowerCamelCase ) ) def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = [] for path, directories, files in os.walk(lowerCamelCase ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(lowerCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase ) / folder).glob("*.py" ) ) ) == 0: continue __lowerCAmelCase = str((Path(lowerCamelCase ) / folder).relative_to(lowerCamelCase ) ) __lowerCAmelCase = short_path.replace(os.path.sep , "." ) submodules.append(lowerCamelCase ) for fname in files: if fname == "__init__.py": continue __lowerCAmelCase = str((Path(lowerCamelCase ) / fname).relative_to(lowerCamelCase ) ) __lowerCAmelCase = short_path.replace(".py" , "" ).replace(os.path.sep , "." ) if len(submodule.split("." ) ) == 1: submodules.append(lowerCamelCase ) return submodules lowerCAmelCase : List[str] = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', '''models.esm.openfold_utils''', ] def __lowerCAmelCase ( ): '''simple docstring''' from transformers.utils import direct_transformers_import __lowerCAmelCase = direct_transformers_import(lowerCamelCase ) __lowerCAmelCase = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase , "__init__.py" ) , "r" ) as f: __lowerCAmelCase = f.read() import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , lowerCamelCase ) ) ) __lowerCAmelCase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase ) > 0: __lowerCAmelCase = "\n".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( "The following submodules are not properly registed in the main init of Transformers:\n" f'''{list_of_modules}\n''' "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
721
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Optional[Any] = """dpr""" def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple: super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = projection_dim __lowerCAmelCase = position_embedding_type
39
0
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {'''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase : Optional[int] = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Dict = VOCAB_FILES_NAMES a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP a : List[Any] = ["""input_ids""", """attention_mask"""] a : Tuple = None def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="<unk>" , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="<pad>" , UpperCamelCase=False , UpperCamelCase=False , **UpperCamelCase , ) -> Any: super().__init__( UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , pad_token=UpperCamelCase , add_prefix_space=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space: __lowerCAmelCase = getattr(UpperCamelCase , pre_tok_state.pop("type" ) ) __lowerCAmelCase = add_prefix_space __lowerCAmelCase = pre_tok_class(**UpperCamelCase ) __lowerCAmelCase = add_prefix_space def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> BatchEncoding: __lowerCAmelCase = kwargs.get("is_split_into_words" , UpperCamelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' " pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase ) def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> BatchEncoding: __lowerCAmelCase = kwargs.get("is_split_into_words" , UpperCamelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' " pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase , **UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]: __lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[int]: __lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] ) if len(UpperCamelCase ) > self.model_max_length: __lowerCAmelCase = input_ids[-self.model_max_length :] return input_ids
700
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
0
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self , *, UpperCamelCase = 4 , UpperCamelCase = 768 , UpperCamelCase , UpperCamelCase , ) -> Optional[Any]: super().__init__() __lowerCAmelCase = nn.Parameter(torch.zeros(UpperCamelCase ) ) # parameters for additional clip time embeddings __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) # parameters for encoder hidden states __lowerCAmelCase = clip_extra_context_tokens __lowerCAmelCase = nn.Linear( UpperCamelCase , self.clip_extra_context_tokens * cross_attention_dim ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = nn.LayerNorm(UpperCamelCase ) def UpperCAmelCase_ ( self , *, UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __lowerCAmelCase = image_embeddings.shape[0] __lowerCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) __lowerCAmelCase = classifier_free_guidance_embeddings.expand( UpperCamelCase , -1 ) __lowerCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __lowerCAmelCase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __lowerCAmelCase = self.embedding_proj(UpperCamelCase ) __lowerCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase ) __lowerCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __lowerCAmelCase = self.clip_extra_context_tokens_proj(UpperCamelCase ) __lowerCAmelCase = clip_extra_context_tokens.reshape(UpperCamelCase , -1 , self.clip_extra_context_tokens ) __lowerCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 ) __lowerCAmelCase = self.encoder_hidden_states_proj(UpperCamelCase ) __lowerCAmelCase = self.text_encoder_hidden_states_norm(UpperCamelCase ) __lowerCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
701
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"} __lowerCAmelCase = features.copy() __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' if issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = jsonl_path elif issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = [jsonl_path] __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) for split in splits: __lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' if split: __lowerCAmelCase = {split: jsonl_path} else: __lowerCAmelCase = "train" __lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path} __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ): '''simple docstring''' return json.load(lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' return [json.loads(lowerCamelCase ) for line in buffer] class UpperCAmelCase__ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: with pytest.raises(UpperCamelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' __lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() assert exported_content == original_content
39
0
'''simple docstring''' import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowerCAmelCase : int = logging.get_logger(__name__) class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = ["""input_values""", """attention_mask"""] def __init__( self , UpperCamelCase = 1 , UpperCamelCase = 1_6000 , UpperCamelCase = 0.0 , UpperCamelCase = False , UpperCamelCase = 80 , UpperCamelCase = 16 , UpperCamelCase = 64 , UpperCamelCase = "hann_window" , UpperCamelCase = 1.0 , UpperCamelCase = 80 , UpperCamelCase = 7600 , UpperCamelCase = 1E-10 , UpperCamelCase = 2 , UpperCamelCase = True , **UpperCamelCase , ) -> Union[str, Any]: super().__init__(feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = do_normalize __lowerCAmelCase = return_attention_mask __lowerCAmelCase = num_mel_bins __lowerCAmelCase = hop_length __lowerCAmelCase = win_length __lowerCAmelCase = win_function __lowerCAmelCase = frame_signal_scale __lowerCAmelCase = fmin __lowerCAmelCase = fmax __lowerCAmelCase = mel_floor __lowerCAmelCase = reduction_factor __lowerCAmelCase = win_length * sampling_rate // 1000 __lowerCAmelCase = hop_length * sampling_rate // 1000 __lowerCAmelCase = optimal_fft_length(self.sample_size ) __lowerCAmelCase = (self.n_fft // 2) + 1 __lowerCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCamelCase ) __lowerCAmelCase = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , ) if frame_signal_scale != 1.0: warnings.warn( "The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCamelCase , ) if reduction_factor != 2.0: warnings.warn( "The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCamelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: __lowerCAmelCase = np.array(UpperCamelCase , np.intaa ) __lowerCAmelCase = [] for vector, length in zip(UpperCamelCase , attention_mask.sum(-1 ) ): __lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: __lowerCAmelCase = padding_value normed_input_values.append(UpperCamelCase ) else: __lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def UpperCAmelCase_ ( self , UpperCamelCase , ) -> np.ndarray: __lowerCAmelCase = spectrogram( UpperCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , ) return log_mel_spec.T def __call__( self , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ) -> BatchFeature: if audio is None and audio_target is None: raise ValueError("You must provide either `audio` or `audio_target` values." ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if audio is not None: __lowerCAmelCase = self._process_audio( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase , ) else: __lowerCAmelCase = None if audio_target is not None: __lowerCAmelCase = self._process_audio( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase , ) if inputs is None: return inputs_target else: __lowerCAmelCase = inputs_target["input_values"] __lowerCAmelCase = inputs_target.get("attention_mask" ) if decoder_attention_mask is not None: __lowerCAmelCase = decoder_attention_mask return inputs def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ) -> BatchFeature: __lowerCAmelCase = isinstance(UpperCamelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) __lowerCAmelCase = is_batched_numpy or ( isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowerCAmelCase = [np.asarray(UpperCamelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ): __lowerCAmelCase = np.asarray(UpperCamelCase , dtype=np.floataa ) elif isinstance(UpperCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): __lowerCAmelCase = speech.astype(np.floataa ) # always return batch if not is_batched: __lowerCAmelCase = [speech] # needed to make pad() work on spectrogram inputs __lowerCAmelCase = self.feature_size # convert into correct format for padding if is_target: __lowerCAmelCase = [self._extract_mel_features(UpperCamelCase ) for waveform in speech] __lowerCAmelCase = BatchFeature({"input_values": features} ) __lowerCAmelCase = self.num_mel_bins else: __lowerCAmelCase = BatchFeature({"input_values": speech} ) __lowerCAmelCase = self.pad( UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = feature_size_hack # convert input values to correct format __lowerCAmelCase = padded_inputs["input_values"] if not isinstance(input_values[0] , np.ndarray ): __lowerCAmelCase = [np.asarray(UpperCamelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(UpperCamelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): __lowerCAmelCase = [array.astype(np.floataa ) for array in input_values] elif isinstance(UpperCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): __lowerCAmelCase = input_values.astype(np.floataa ) # convert attention_mask to correct format __lowerCAmelCase = padded_inputs.get("attention_mask" ) if attention_mask is not None: __lowerCAmelCase = [np.asarray(UpperCamelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: __lowerCAmelCase = ( attention_mask if self._get_padding_strategies(UpperCamelCase , max_length=UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __lowerCAmelCase = self.zero_mean_unit_var_norm( padded_inputs["input_values"] , attention_mask=UpperCamelCase , padding_value=self.padding_value ) if return_tensors is not None: __lowerCAmelCase = padded_inputs.convert_to_tensors(UpperCamelCase ) return padded_inputs def UpperCAmelCase_ ( self ) -> Dict[str, Any]: __lowerCAmelCase = super().to_dict() # Don't serialize these as they are derived from the other properties. __lowerCAmelCase = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"] for name in names: if name in output: del output[name] return output
702
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : Optional[Any] = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
0
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ) -> List[str]: __lowerCAmelCase = parent __lowerCAmelCase = 13 __lowerCAmelCase = 7 __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = 99 __lowerCAmelCase = 384 __lowerCAmelCase = 2 __lowerCAmelCase = 4 __lowerCAmelCase = 37 __lowerCAmelCase = "gelu" __lowerCAmelCase = 0.1 __lowerCAmelCase = 0.1 __lowerCAmelCase = 512 __lowerCAmelCase = 16 __lowerCAmelCase = 2 __lowerCAmelCase = 0.02 __lowerCAmelCase = 3 __lowerCAmelCase = 4 __lowerCAmelCase = 128 __lowerCAmelCase = 2 __lowerCAmelCase = 9 __lowerCAmelCase = 1 __lowerCAmelCase = None def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = None if self.use_token_type_ids: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = TFConvBertModel(config=UpperCamelCase ) __lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __lowerCAmelCase = [input_ids, input_mask] __lowerCAmelCase = model(UpperCamelCase ) __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: __lowerCAmelCase = TFConvBertForMaskedLM(config=UpperCamelCase ) __lowerCAmelCase = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: __lowerCAmelCase = self.num_labels __lowerCAmelCase = TFConvBertForSequenceClassification(config=UpperCamelCase ) __lowerCAmelCase = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = self.num_choices __lowerCAmelCase = TFConvBertForMultipleChoice(config=UpperCamelCase ) __lowerCAmelCase = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: __lowerCAmelCase = self.num_labels __lowerCAmelCase = TFConvBertForTokenClassification(config=UpperCamelCase ) __lowerCAmelCase = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: __lowerCAmelCase = TFConvBertForQuestionAnswering(config=UpperCamelCase ) __lowerCAmelCase = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __lowerCAmelCase = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a : Dict = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) a : str = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) a : List[str] = False a : Optional[int] = False a : Optional[int] = False def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = TFConvBertModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def UpperCAmelCase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) @slow def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True __lowerCAmelCase = True if hasattr(UpperCamelCase , "use_cache" ): __lowerCAmelCase = True __lowerCAmelCase = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __lowerCAmelCase = getattr(self.model_tester , "key_length" , UpperCamelCase ) for model_class in self.all_model_classes: __lowerCAmelCase = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = model_class(UpperCamelCase ) __lowerCAmelCase = len(model(UpperCamelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase , saved_model=UpperCamelCase ) __lowerCAmelCase = os.path.join(UpperCamelCase , "saved_model" , "1" ) __lowerCAmelCase = tf.keras.models.load_model(UpperCamelCase ) __lowerCAmelCase = model(UpperCamelCase ) if self.is_encoder_decoder: __lowerCAmelCase = outputs["encoder_hidden_states"] __lowerCAmelCase = outputs["encoder_attentions"] else: __lowerCAmelCase = outputs["hidden_states"] __lowerCAmelCase = outputs["attentions"] self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) __lowerCAmelCase = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(UpperCamelCase ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True __lowerCAmelCase = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) __lowerCAmelCase = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __lowerCAmelCase = getattr(self.model_tester , "key_length" , UpperCamelCase ) __lowerCAmelCase = getattr(self.model_tester , "key_length" , UpperCamelCase ) def check_decoder_attentions_output(UpperCamelCase ): __lowerCAmelCase = len(UpperCamelCase ) self.assertEqual(out_len % 2 , 0 ) __lowerCAmelCase = outputs.decoder_attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCamelCase ): __lowerCAmelCase = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __lowerCAmelCase = True __lowerCAmelCase = False __lowerCAmelCase = model_class(UpperCamelCase ) __lowerCAmelCase = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) __lowerCAmelCase = len(UpperCamelCase ) self.assertEqual(config.output_hidden_states , UpperCamelCase ) check_encoder_attentions_output(UpperCamelCase ) if self.is_encoder_decoder: __lowerCAmelCase = model_class(UpperCamelCase ) __lowerCAmelCase = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase ) check_decoder_attentions_output(UpperCamelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __lowerCAmelCase = True __lowerCAmelCase = model_class(UpperCamelCase ) __lowerCAmelCase = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase ) check_encoder_attentions_output(UpperCamelCase ) # Check attention is always last and order is fine __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = model_class(UpperCamelCase ) __lowerCAmelCase = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase ) ) self.assertEqual(model.config.output_hidden_states , UpperCamelCase ) check_encoder_attentions_output(UpperCamelCase ) @require_tf class UpperCAmelCase__ ( unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: __lowerCAmelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) __lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowerCAmelCase = model(UpperCamelCase )[0] __lowerCAmelCase = [1, 6, 768] self.assertEqual(output.shape , UpperCamelCase ) __lowerCAmelCase = tf.constant( [ [ [-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32], [0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24], [0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase , atol=1E-4 )
703
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[str] = (CMStochasticIterativeScheduler,) a : str = 1_0 def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str: __lowerCAmelCase = { "num_train_timesteps": 201, "sigma_min": 0.0_02, "sigma_max": 80.0, } config.update(**UpperCamelCase ) return config def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = 10 __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps[0] __lowerCAmelCase = scheduler.timesteps[1] __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase_ ( self ) -> Any: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = 1 scheduler.set_timesteps(UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCamelCase ): # 1. scale model input __lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # 2. predict noise residual __lowerCAmelCase = model(UpperCamelCase , UpperCamelCase ) # 3. predict previous sample x_t-1 __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [106, 0] scheduler.set_timesteps(timesteps=UpperCamelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # 2. predict noise residual __lowerCAmelCase = model(UpperCamelCase , UpperCamelCase ) # 3. predict previous sample x_t-1 __lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [39, 30, 12, 15, 0] with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [39, 30, 12, 1, 0] __lowerCAmelCase = len(UpperCamelCase ) with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**UpperCamelCase ) __lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCamelCase )
39
0
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process lowerCAmelCase : List[str] = logging.getLogger(__name__) lowerCAmelCase : str = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) lowerCAmelCase : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCAmelCase__ : a : Optional[str] = field( default=UpperCamelCase__ , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase__ )} , ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a : bool = field( default=UpperCamelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def UpperCAmelCase_ ( self ) -> Tuple: if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class UpperCAmelCase__ : a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) a : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a : Optional[int] = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) a : float = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) a : bool = field( default=UpperCamelCase__ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def UpperCAmelCase_ ( self ) -> Dict: if self.train_file is not None: __lowerCAmelCase = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: __lowerCAmelCase = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' with open(lowerCamelCase , "r" , encoding="utf-8" ) as f: __lowerCAmelCase = [json.loads(lowerCamelCase ) for line in f.read().splitlines() if (len(lowerCamelCase ) > 0 and not line.isspace())] assert len(lowerCamelCase ) == len(lowerCamelCase ) __lowerCAmelCase = {c: dataset[c] for c in dataset.column_names} __lowerCAmelCase = refs return Dataset.from_dict(lowerCamelCase ) def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , lowerCamelCase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): __lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , ) __lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , ) else: __lowerCAmelCase = {} if data_args.train_file is not None: __lowerCAmelCase = data_args.train_file if data_args.validation_file is not None: __lowerCAmelCase = data_args.validation_file __lowerCAmelCase = data_args.train_file.split("." )[-1] if extension == "txt": __lowerCAmelCase = "text" __lowerCAmelCase = load_dataset(lowerCamelCase , data_files=lowerCamelCase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: __lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **lowerCamelCase ) elif model_args.model_name_or_path: __lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase ) else: __lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(f'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(f'''New config: {config}''' ) __lowerCAmelCase = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: __lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCamelCase ) elif model_args.model_name_or_path: __lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCamelCase ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: __lowerCAmelCase = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) __lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCamelCase ) model.resize_token_embeddings(len(lowerCamelCase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: __lowerCAmelCase = datasets["train"].column_names else: __lowerCAmelCase = datasets["validation"].column_names __lowerCAmelCase = "text" if "text" in column_names else column_names[0] __lowerCAmelCase = "max_length" if data_args.pad_to_max_length else False def tokenize_function(lowerCamelCase : str ): # Remove empty lines __lowerCAmelCase = [line for line in examples["text"] if len(lowerCamelCase ) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=data_args.max_seq_length ) __lowerCAmelCase = datasets.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: __lowerCAmelCase = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: __lowerCAmelCase = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer __lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file if has_ref: __lowerCAmelCase = False # Data collator # This one will take care of randomly masking the tokens. __lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowerCamelCase , args=lowerCamelCase , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , ) # Training if training_args.do_train: if last_checkpoint is not None: __lowerCAmelCase = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): __lowerCAmelCase = model_args.model_name_or_path else: __lowerCAmelCase = None __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase ) trainer.save_model() # Saves the tokenizer too for easy upload __lowerCAmelCase = os.path.join(training_args.output_dir , "train_results.txt" ) if trainer.is_world_process_zero(): with open(lowerCamelCase , "w" ) as writer: logger.info("***** Train results *****" ) for key, value in sorted(train_result.metrics.items() ): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # Evaluation __lowerCAmelCase = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowerCAmelCase = trainer.evaluate() __lowerCAmelCase = math.exp(eval_output["eval_loss"] ) __lowerCAmelCase = perplexity __lowerCAmelCase = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" ) if trainer.is_world_process_zero(): with open(lowerCamelCase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in sorted(results.items() ): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) return results def __lowerCAmelCase ( lowerCamelCase : List[Any] ): '''simple docstring''' main() if __name__ == "__main__": main()
704
'''simple docstring''' import requests from bsa import BeautifulSoup def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ): '''simple docstring''' __lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" ) __lowerCAmelCase = soup.findAll("h1" ) __lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f'{key}\n{value}\n')
39
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Dict = """data2vec-vision""" def __init__( self , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=224 , UpperCamelCase=16 , UpperCamelCase=3 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=True , UpperCamelCase=[3, 5, 7, 11] , UpperCamelCase=[1, 2, 3, 6] , UpperCamelCase=True , UpperCamelCase=0.4 , UpperCamelCase=256 , UpperCamelCase=1 , UpperCamelCase=False , UpperCamelCase=255 , **UpperCamelCase , ) -> Any: super().__init__(**UpperCamelCase ) __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = use_mask_token __lowerCAmelCase = use_absolute_position_embeddings __lowerCAmelCase = use_relative_position_bias __lowerCAmelCase = use_shared_relative_position_bias __lowerCAmelCase = layer_scale_init_value __lowerCAmelCase = drop_path_rate __lowerCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) __lowerCAmelCase = out_indices __lowerCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) __lowerCAmelCase = use_auxiliary_head __lowerCAmelCase = auxiliary_loss_weight __lowerCAmelCase = auxiliary_channels __lowerCAmelCase = auxiliary_num_convs __lowerCAmelCase = auxiliary_concat_input __lowerCAmelCase = semantic_loss_ignore_index class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = version.parse("""1.11""" ) @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCAmelCase_ ( self ) -> float: return 1E-4
705
'''simple docstring''' from __future__ import annotations import math def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2: raise Exception("Matrices are not 2x2" ) __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(lowerCamelCase ) ) ] def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(lowerCamelCase ) ) ] def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("Odd matrices are not supported!" ) __lowerCAmelCase = len(lowerCamelCase ) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase ) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )] return top_left, top_right, bot_left, bot_right def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' return len(lowerCamelCase ), len(matrix[0] ) def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' print("\n".join(str(lowerCamelCase ) for line in matrix ) ) def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if matrix_dimensions(lowerCamelCase ) == (2, 2): return default_matrix_multiplication(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase ) __lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) ) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase ) __lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase ) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(lowerCamelCase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ): '''simple docstring''' if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]: __lowerCAmelCase = ( "Unable to multiply these matrices, please check the dimensions.\n" f'''Matrix A: {matrixa}\n''' f'''Matrix B: {matrixa}''' ) raise Exception(lowerCamelCase ) __lowerCAmelCase = matrix_dimensions(lowerCamelCase ) __lowerCAmelCase = matrix_dimensions(lowerCamelCase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase ) __lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) ) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , lowerCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase ) # Removing the additional zeros for i in range(0 , lowerCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": lowerCAmelCase : Tuple = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
39
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Dict = logging.get_logger(__name__) lowerCAmelCase : Dict = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Optional[int] = """sew-d""" def __init__( self , UpperCamelCase=32 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase=2 , UpperCamelCase=512 , UpperCamelCase=256 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=("p2c", "c2p") , UpperCamelCase="layer_norm" , UpperCamelCase="gelu_python" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.02 , UpperCamelCase=1E-7 , UpperCamelCase=1E-5 , UpperCamelCase="group" , UpperCamelCase="gelu" , UpperCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase=False , UpperCamelCase=128 , UpperCamelCase=16 , UpperCamelCase=True , UpperCamelCase=0.05 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase="mean" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=256 , UpperCamelCase=0 , UpperCamelCase=1 , UpperCamelCase=2 , **UpperCamelCase , ) -> Optional[int]: super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase ) __lowerCAmelCase = hidden_size __lowerCAmelCase = feat_extract_norm __lowerCAmelCase = feat_extract_activation __lowerCAmelCase = list(UpperCamelCase ) __lowerCAmelCase = list(UpperCamelCase ) __lowerCAmelCase = list(UpperCamelCase ) __lowerCAmelCase = conv_bias __lowerCAmelCase = num_conv_pos_embeddings __lowerCAmelCase = num_conv_pos_embedding_groups __lowerCAmelCase = len(self.conv_dim ) __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = intermediate_size __lowerCAmelCase = squeeze_factor __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = position_buckets __lowerCAmelCase = share_att_key __lowerCAmelCase = relative_attention __lowerCAmelCase = norm_rel_ebd __lowerCAmelCase = list(UpperCamelCase ) __lowerCAmelCase = hidden_act __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation_dropout __lowerCAmelCase = feat_proj_dropout __lowerCAmelCase = final_dropout __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = feature_layer_norm_eps __lowerCAmelCase = initializer_range __lowerCAmelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCAmelCase = apply_spec_augment __lowerCAmelCase = mask_time_prob __lowerCAmelCase = mask_time_length __lowerCAmelCase = mask_time_min_masks __lowerCAmelCase = mask_feature_prob __lowerCAmelCase = mask_feature_length __lowerCAmelCase = mask_feature_min_masks # ctc loss __lowerCAmelCase = ctc_loss_reduction __lowerCAmelCase = ctc_zero_infinity # sequence classification __lowerCAmelCase = use_weighted_layer_sum __lowerCAmelCase = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Optional[int]: return functools.reduce(operator.mul , self.conv_stride , 1 )
706
'''simple docstring''' import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput lowerCAmelCase : Optional[Any] = '''scheduler_config.json''' class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = 1 a : Optional[int] = 2 a : int = 3 a : Union[str, Any] = 4 a : int = 5 a : Optional[int] = 6 a : str = 7 a : List[Any] = 8 a : List[str] = 9 a : List[str] = 1_0 a : int = 1_1 a : Any = 1_2 a : Any = 1_3 a : Tuple = 1_4 @dataclass class UpperCAmelCase__ ( UpperCamelCase__ ): a : torch.FloatTensor class UpperCAmelCase__ : a : Tuple = SCHEDULER_CONFIG_NAME a : Union[str, Any] = [] a : str = True @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict: self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def UpperCAmelCase_ ( self ) -> str: return self._get_compatibles() @classmethod def UpperCAmelCase_ ( cls ) -> Tuple: __lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) ) __lowerCAmelCase = importlib.import_module(__name__.split("." )[0] ) __lowerCAmelCase = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
39
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Tuple = """deta""" a : str = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , UpperCamelCase=None , UpperCamelCase=900 , UpperCamelCase=2048 , UpperCamelCase=6 , UpperCamelCase=2048 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=True , UpperCamelCase=300 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , **UpperCamelCase , ) -> List[Any]: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __lowerCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = backbone_config.pop("model_type" ) __lowerCAmelCase = CONFIG_MAPPING[backbone_model_type] __lowerCAmelCase = config_class.from_dict(UpperCamelCase ) __lowerCAmelCase = backbone_config __lowerCAmelCase = num_queries __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = d_model __lowerCAmelCase = encoder_ffn_dim __lowerCAmelCase = encoder_layers __lowerCAmelCase = encoder_attention_heads __lowerCAmelCase = decoder_ffn_dim __lowerCAmelCase = decoder_layers __lowerCAmelCase = decoder_attention_heads __lowerCAmelCase = dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation_dropout __lowerCAmelCase = activation_function __lowerCAmelCase = init_std __lowerCAmelCase = init_xavier_std __lowerCAmelCase = encoder_layerdrop __lowerCAmelCase = auxiliary_loss __lowerCAmelCase = position_embedding_type # deformable attributes __lowerCAmelCase = num_feature_levels __lowerCAmelCase = encoder_n_points __lowerCAmelCase = decoder_n_points __lowerCAmelCase = two_stage __lowerCAmelCase = two_stage_num_proposals __lowerCAmelCase = with_box_refine __lowerCAmelCase = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher __lowerCAmelCase = class_cost __lowerCAmelCase = bbox_cost __lowerCAmelCase = giou_cost # Loss coefficients __lowerCAmelCase = mask_loss_coefficient __lowerCAmelCase = dice_loss_coefficient __lowerCAmelCase = bbox_loss_coefficient __lowerCAmelCase = giou_loss_coefficient __lowerCAmelCase = eos_coefficient __lowerCAmelCase = focal_alpha super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def UpperCAmelCase_ ( self ) -> int: return self.encoder_attention_heads @property def UpperCAmelCase_ ( self ) -> int: return self.d_model def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = copy.deepcopy(self.__dict__ ) __lowerCAmelCase = self.backbone_config.to_dict() __lowerCAmelCase = self.__class__.model_type return output
707
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger lowerCAmelCase : List[Any] = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , UpperCamelCase = None ) -> Union[str, Any]: __lowerCAmelCase = ( os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __lowerCAmelCase = Extractor def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __lowerCAmelCase = os.path.abspath(UpperCamelCase ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool: return force_extract or ( not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase )) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str: __lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase ) if not extractor_format: return input_path __lowerCAmelCase = self._get_output_path(UpperCamelCase ) if self._do_extract(UpperCamelCase , UpperCamelCase ): self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return output_path class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod @abstractmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: ... @staticmethod @abstractmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: ... class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): a : List[bytes] = [] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]: with open(UpperCamelCase , "rb" ) as f: return f.read(UpperCamelCase ) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if not magic_number: __lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) try: __lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase ) except OSError: return False return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: return tarfile.is_tarfile(UpperCamelCase ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: def resolved(UpperCamelCase ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase ) ) def badpath(UpperCamelCase , UpperCamelCase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase ) def badlink(UpperCamelCase , UpperCamelCase ) -> bool: # Links are interpreted relative to the directory containing the link __lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase ) __lowerCAmelCase = resolved(UpperCamelCase ) for finfo in members: if badpath(finfo.name , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = tarfile.open(UpperCamelCase ) tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x1F\x8B"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with gzip.open(UpperCamelCase , "rb" ) as gzip_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[Any] = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase , "rb" ) as fp: __lowerCAmelCase = _EndRecData(UpperCamelCase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be if len(UpperCamelCase ) == sizeCentralDir: __lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file: zip_file.extractall(UpperCamelCase ) zip_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with lzma.open(UpperCamelCase ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile" ) import rarfile os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = rarfile.RarFile(UpperCamelCase ) rf.extractall(UpperCamelCase ) rf.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : int = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard" ) import zstandard as zstd __lowerCAmelCase = zstd.ZstdDecompressor() with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh: dctx.copy_stream(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x42\x5A\x68"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with bza.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr" ) import pyazr os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive: archive.extractall(UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x04\x22\x4D\x18"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4" ) import lza.frame with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) a : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def UpperCAmelCase_ ( cls ) -> Optional[Any]: return max( len(UpperCamelCase ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase , UpperCamelCase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase ) except OSError: return b"" @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool: warnings.warn( "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'infer_extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/> __lowerCAmelCase = cls._get_magic_number_max_length() __lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return extractor_format @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase ) # Prevent parallel extractions __lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) ) with FileLock(UpperCamelCase ): shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg warnings.warn( "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format else: __lowerCAmelCase = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase , UpperCamelCase ) else: warnings.warn( "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0." , category=UpperCamelCase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase ): return extractor.extract(UpperCamelCase , UpperCamelCase )
39
0
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCAmelCase__ : @staticmethod def UpperCAmelCase_ ( *UpperCamelCase , **UpperCamelCase ) -> Union[str, Any]: pass def __lowerCAmelCase ( lowerCamelCase : Image ): '''simple docstring''' __lowerCAmelCase = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCAmelCase__ ( unittest.TestCase ): a : List[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: __lowerCAmelCase = DepthEstimationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , UpperCamelCase ) import datasets __lowerCAmelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) __lowerCAmelCase = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , UpperCamelCase , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def UpperCAmelCase_ ( self ) -> Dict: pass @slow @require_torch def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = "Intel/dpt-large" __lowerCAmelCase = pipeline("depth-estimation" , model=UpperCamelCase ) __lowerCAmelCase = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) __lowerCAmelCase = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.3_04 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_62 ) @require_torch def UpperCAmelCase_ ( self ) -> Dict: # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
708
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self ) -> List[str]: # test for the above condition self.test() def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = 0 __lowerCAmelCase = False while not completed: if counter == 1: self.reset() __lowerCAmelCase = self.advance() if not self.does_advance(UpperCamelCase ): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase ) counter += 1 if counter > 1_0000: raise Exception("update() does not fulfill the constraint." ) if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly." ) @abstractmethod def UpperCAmelCase_ ( self ) -> Dict: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> Dict: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) __lowerCAmelCase = token_ids __lowerCAmelCase = len(self.token_ids ) __lowerCAmelCase = -1 # the index of the currently fulfilled step __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> Optional[int]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.fulfilled_idx += 1 __lowerCAmelCase = True if self.fulfilled_idx == (self.seqlen - 1): __lowerCAmelCase = True __lowerCAmelCase = completed else: # failed to make progress. __lowerCAmelCase = True self.reset() return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = False __lowerCAmelCase = 0 def UpperCAmelCase_ ( self ) -> Optional[int]: return self.seqlen - (self.fulfilled_idx + 1) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]: __lowerCAmelCase = PhrasalConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.fulfilled_idx __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]: __lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] ) __lowerCAmelCase = {} for token_ids in nested_token_ids: __lowerCAmelCase = root for tidx, token_id in enumerate(UpperCamelCase ): if token_id not in level: __lowerCAmelCase = {} __lowerCAmelCase = level[token_id] if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" F''' {nested_token_ids}.''' ) __lowerCAmelCase = root def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: __lowerCAmelCase = self.trie for current_token in current_seq: __lowerCAmelCase = start[current_token] __lowerCAmelCase = list(start.keys() ) return next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: __lowerCAmelCase = self.next_tokens(UpperCamelCase ) return len(UpperCamelCase ) == 0 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = list(root.values() ) if len(UpperCamelCase ) == 0: return 1 else: return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: __lowerCAmelCase = self.count_leaves(UpperCamelCase ) return len(UpperCamelCase ) != leaf_count class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> List[Any]: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) __lowerCAmelCase = DisjunctiveTrie(UpperCamelCase ) __lowerCAmelCase = nested_token_ids __lowerCAmelCase = self.trie.max_height __lowerCAmelCase = [] __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.current_seq.append(UpperCamelCase ) __lowerCAmelCase = True else: __lowerCAmelCase = True self.reset() __lowerCAmelCase = self.trie.reached_leaf(self.current_seq ) __lowerCAmelCase = completed return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = False __lowerCAmelCase = [] def UpperCAmelCase_ ( self ) -> int: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]: __lowerCAmelCase = DisjunctiveConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.current_seq __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = constraints # max # of steps required to fulfill a given constraint __lowerCAmelCase = max([c.seqlen for c in constraints] ) __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = False self.init_state() def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = [] __lowerCAmelCase = None __lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints] def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __lowerCAmelCase = constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) else: __lowerCAmelCase = self.inprogress_constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) __lowerCAmelCase , __lowerCAmelCase = False, False if self.completed: __lowerCAmelCase = True __lowerCAmelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) ) __lowerCAmelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __lowerCAmelCase = None if len(self.pending_constraints ) == 0: # we're done! __lowerCAmelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(UpperCamelCase ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase ) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(UpperCamelCase ) __lowerCAmelCase = None if not complete and stepped: __lowerCAmelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __lowerCAmelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __lowerCAmelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str: __lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __lowerCAmelCase = [ constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase ) __lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
39
0
'''simple docstring''' import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger lowerCAmelCase : List[str] = get_logger(__name__) lowerCAmelCase : Dict = Path(__file__).parent / '''model_card_template.md''' lowerCAmelCase : int = uuida().hex lowerCAmelCase : List[str] = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES lowerCAmelCase : str = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/''' def __lowerCAmelCase ( lowerCamelCase : Union[Dict, str, None] = None ): '''simple docstring''' __lowerCAmelCase = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(lowerCamelCase , lowerCamelCase ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(lowerCamelCase , lowerCamelCase ): ua += "; " + user_agent return ua def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if token is None: __lowerCAmelCase = HfFolder.get_token() if organization is None: __lowerCAmelCase = whoami(lowerCamelCase )["name"] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int ): '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(lowerCamelCase , "local_rank" ) and args.local_rank not in [-1, 0]: return __lowerCAmelCase = args.hub_token if hasattr(lowerCamelCase , "hub_token" ) else None __lowerCAmelCase = get_full_repo_name(lowerCamelCase , token=lowerCamelCase ) __lowerCAmelCase = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCamelCase , model_name=lowerCamelCase , repo_name=lowerCamelCase , dataset_name=args.dataset_name if hasattr(lowerCamelCase , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(lowerCamelCase , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(lowerCamelCase , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCamelCase , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCamelCase , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCamelCase , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCamelCase , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCamelCase , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCamelCase , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(lowerCamelCase , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCamelCase , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) __lowerCAmelCase = os.path.join(args.output_dir , "README.md" ) model_card.save(lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Optional[str] , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash __lowerCAmelCase = str(Path(lowerCamelCase ).as_posix() ) __lowerCAmelCase = re.search(r"snapshots/([^/]+)/" , lowerCamelCase ) if search is None: return None __lowerCAmelCase = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(lowerCamelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. lowerCAmelCase : Union[str, Any] = os.path.expanduser( os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface''')) ) lowerCAmelCase : List[str] = os.path.join(hf_cache_home, '''diffusers''') def __lowerCAmelCase ( lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if new_cache_dir is None: __lowerCAmelCase = DIFFUSERS_CACHE if old_cache_dir is None: __lowerCAmelCase = old_diffusers_cache __lowerCAmelCase = Path(lowerCamelCase ).expanduser() __lowerCAmelCase = Path(lowerCamelCase ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): __lowerCAmelCase = new_cache_dir / old_blob_path.relative_to(lowerCamelCase ) new_blob_path.parent.mkdir(parents=lowerCamelCase , exist_ok=lowerCamelCase ) os.replace(lowerCamelCase , lowerCamelCase ) try: os.symlink(lowerCamelCase , lowerCamelCase ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). lowerCAmelCase : str = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''') if not os.path.isfile(cache_version_file): lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: lowerCAmelCase : Union[str, Any] = int(f.read()) except ValueError: lowerCAmelCase : Optional[Any] = 0 if cache_version < 1: lowerCAmelCase : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( '''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ''' '''existing cached models. This is a one-time operation, you can interrupt it or run it ''' '''later by calling `diffusers.utils.hub_utils.move_cache()`.''' ) try: move_cache() except Exception as e: lowerCAmelCase : Union[str, Any] = '''\n'''.join(traceback.format_tb(e.__traceback__)) logger.error( f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ' '''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ''' '''message and we will do our best to help.''' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, '''w''') as f: f.write('''1''') except Exception: logger.warning( f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ' '''the directory exists and can be written to.''' ) def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if variant is not None: __lowerCAmelCase = weights_name.split("." ) __lowerCAmelCase = splits[:-1] + [variant] + splits[-1:] __lowerCAmelCase = ".".join(lowerCamelCase ) return weights_name def __lowerCAmelCase ( lowerCamelCase : str , *, lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=None , ): '''simple docstring''' __lowerCAmelCase = str(lowerCamelCase ) if os.path.isfile(lowerCamelCase ): return pretrained_model_name_or_path elif os.path.isdir(lowerCamelCase ): if os.path.isfile(os.path.join(lowerCamelCase , lowerCamelCase ) ): # Load from a PyTorch checkpoint __lowerCAmelCase = os.path.join(lowerCamelCase , lowerCamelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(lowerCamelCase , lowerCamelCase , lowerCamelCase ) ): __lowerCAmelCase = os.path.join(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(lowerCamelCase ).base_version ) >= version.parse("0.20.0" ) ): try: __lowerCAmelCase = hf_hub_download( lowerCamelCase , filename=_add_variant(lowerCamelCase , lowerCamelCase ) , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , use_auth_token=lowerCamelCase , user_agent=lowerCamelCase , subfolder=lowerCamelCase , revision=revision or commit_hash , ) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , lowerCamelCase , ) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCamelCase , lowerCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(lowerCamelCase , lowerCamelCase )}\' so that the correct variant file can be added.''' , lowerCamelCase , ) try: # 2. Load model file as usual __lowerCAmelCase = hf_hub_download( lowerCamelCase , filename=lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , use_auth_token=lowerCamelCase , user_agent=lowerCamelCase , subfolder=lowerCamelCase , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' "this model name. Check the model page at " f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
709
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : List[Any] = KandinskyImgaImgPipeline a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""] a : List[Any] = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", ] a : Any = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a : Union[str, Any] = False @property def UpperCAmelCase_ ( self ) -> int: return 32 @property def UpperCAmelCase_ ( self ) -> List[str]: return 32 @property def UpperCAmelCase_ ( self ) -> Dict: return self.time_input_dim @property def UpperCAmelCase_ ( self ) -> int: return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self ) -> int: return 100 @property def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) __lowerCAmelCase = MultilingualCLIP(UpperCamelCase ) __lowerCAmelCase = text_encoder.eval() return text_encoder @property def UpperCAmelCase_ ( self ) -> List[str]: torch.manual_seed(0 ) __lowerCAmelCase = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase ) return model @property def UpperCAmelCase_ ( self ) -> List[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self ) -> Dict: torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowerCAmelCase = DDIMScheduler(**UpperCamelCase ) __lowerCAmelCase = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]: __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(UpperCamelCase ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(UpperCamelCase ) else: __lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) __lowerCAmelCase = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**UpperCamelCase ) __lowerCAmelCase = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowerCAmelCase = "A red cartoon frog, 4k" __lowerCAmelCase = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase ) __lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(UpperCamelCase ) pipeline.set_progress_bar_config(disable=UpperCamelCase ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase = pipeline( UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
39
0
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "file.csv" __lowerCAmelCase = textwrap.dedent( "\\n header1,header2\n 1,2\n 10,20\n " ) with open(lowerCamelCase , "w" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase ) @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = tmp_path / "malformed_file.csv" __lowerCAmelCase = textwrap.dedent( "\\n header1,header2\n 1,2\n 10,20,\n " ) with open(lowerCamelCase , "w" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase ) @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "csv_with_image.csv" __lowerCAmelCase = textwrap.dedent( f'''\ image {image_file} ''' ) with open(lowerCamelCase , "w" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase ) @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = tmp_path / "csv_with_label.csv" __lowerCAmelCase = textwrap.dedent( "\\n label\n good\n bad\n good\n " ) with open(lowerCamelCase , "w" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase ) @pytest.fixture def __lowerCAmelCase ( lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "csv_with_int_list.csv" __lowerCAmelCase = textwrap.dedent( "\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " ) with open(lowerCamelCase , "w" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = Csv() __lowerCAmelCase = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCamelCase , match="Error tokenizing data" ): for _ in generator: pass assert any( record.levelname == "ERROR" and "Failed to read file" in record.message and os.path.basename(lowerCamelCase ) in record.message for record in caplog.records ) @require_pil def __lowerCAmelCase ( lowerCamelCase : Optional[int] ): '''simple docstring''' with open(lowerCamelCase , encoding="utf-8" ) as f: __lowerCAmelCase = f.read().splitlines()[1] __lowerCAmelCase = Csv(encoding="utf-8" , features=Features({"image": Image()} ) ) __lowerCAmelCase = csv._generate_tables([[csv_file_with_image]] ) __lowerCAmelCase = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("image" ).type == Image()() __lowerCAmelCase = pa_table.to_pydict()["image"] assert generated_content == [{"path": image_file, "bytes": None}] def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' with open(lowerCamelCase , encoding="utf-8" ) as f: __lowerCAmelCase = f.read().splitlines()[1:] __lowerCAmelCase = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) ) __lowerCAmelCase = csv._generate_tables([[csv_file_with_label]] ) __lowerCAmelCase = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )() __lowerCAmelCase = pa_table.to_pydict()["label"] assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(lowerCamelCase ) for label in labels] def __lowerCAmelCase ( lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda lowerCamelCase : [int(lowerCamelCase ) for i in x.split()]} ) __lowerCAmelCase = csv._generate_tables([[csv_file_with_int_list]] ) __lowerCAmelCase = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("int_list" ).type ) __lowerCAmelCase = pa_table.to_pydict()["int_list"] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
710
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') lowerCAmelCase : Any = logging.getLogger(__name__) @dataclass class UpperCAmelCase__ : a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a : bool = field( default=UpperCamelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class UpperCAmelCase__ : a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} ) a : Optional[str] = field( default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) a : bool = field( default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : bool = field( default=UpperCamelCase__ , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a : Optional[int] = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def UpperCAmelCase_ ( self ) -> Tuple: if self.train_file is not None: __lowerCAmelCase = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __lowerCAmelCase = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class UpperCAmelCase__ : a : PreTrainedTokenizerBase a : Union[bool, str, PaddingStrategy] = True a : Optional[int] = None a : Optional[int] = None def __call__( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = "label" if "label" in features[0].keys() else "labels" __lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features] __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = len(features[0]["input_ids"] ) __lowerCAmelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features ] __lowerCAmelCase = list(chain(*UpperCamelCase ) ) __lowerCAmelCase = self.tokenizer.pad( UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten __lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()} # Add back labels __lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa ) return batch def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowerCamelCase ) datasets.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __lowerCAmelCase = {} if data_args.train_file is not None: __lowerCAmelCase = data_args.train_file if data_args.validation_file is not None: __lowerCAmelCase = data_args.validation_file __lowerCAmelCase = data_args.train_file.split("." )[-1] __lowerCAmelCase = load_dataset( lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. __lowerCAmelCase = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. __lowerCAmelCase = [f'''ending{i}''' for i in range(4 )] __lowerCAmelCase = "sent1" __lowerCAmelCase = "sent2" if data_args.max_seq_length is None: __lowerCAmelCase = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) __lowerCAmelCase = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(lowerCamelCase : Tuple ): __lowerCAmelCase = [[context] * 4 for context in examples[context_name]] __lowerCAmelCase = examples[question_header_name] __lowerCAmelCase = [ [f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase ) ] # Flatten out __lowerCAmelCase = list(chain(*lowerCamelCase ) ) __lowerCAmelCase = list(chain(*lowerCamelCase ) ) # Tokenize __lowerCAmelCase = tokenizer( lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) __lowerCAmelCase = raw_datasets["train"] if data_args.max_train_samples is not None: __lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples ) __lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): __lowerCAmelCase = train_dataset.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) __lowerCAmelCase = raw_datasets["validation"] if data_args.max_eval_samples is not None: __lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples ) __lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): __lowerCAmelCase = eval_dataset.map( lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator __lowerCAmelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(lowerCamelCase : Dict ): __lowerCAmelCase , __lowerCAmelCase = eval_predictions __lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase ) trainer.save_model() # Saves the tokenizer too for easy upload __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase ) ) __lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics("train" , lowerCamelCase ) trainer.save_metrics("train" , lowerCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowerCAmelCase = trainer.evaluate() __lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase ) __lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics("eval" , lowerCamelCase ) trainer.save_metrics("eval" , lowerCamelCase ) __lowerCAmelCase = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**lowerCamelCase ) else: trainer.create_model_card(**lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
39
0
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger lowerCAmelCase : List[Any] = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , UpperCamelCase = None ) -> Union[str, Any]: __lowerCAmelCase = ( os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __lowerCAmelCase = Extractor def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __lowerCAmelCase = os.path.abspath(UpperCamelCase ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool: return force_extract or ( not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase )) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str: __lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase ) if not extractor_format: return input_path __lowerCAmelCase = self._get_output_path(UpperCamelCase ) if self._do_extract(UpperCamelCase , UpperCamelCase ): self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return output_path class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod @abstractmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: ... @staticmethod @abstractmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: ... class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): a : List[bytes] = [] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]: with open(UpperCamelCase , "rb" ) as f: return f.read(UpperCamelCase ) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if not magic_number: __lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) try: __lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase ) except OSError: return False return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: return tarfile.is_tarfile(UpperCamelCase ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: def resolved(UpperCamelCase ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase ) ) def badpath(UpperCamelCase , UpperCamelCase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase ) def badlink(UpperCamelCase , UpperCamelCase ) -> bool: # Links are interpreted relative to the directory containing the link __lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase ) __lowerCAmelCase = resolved(UpperCamelCase ) for finfo in members: if badpath(finfo.name , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = tarfile.open(UpperCamelCase ) tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x1F\x8B"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with gzip.open(UpperCamelCase , "rb" ) as gzip_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[Any] = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase , "rb" ) as fp: __lowerCAmelCase = _EndRecData(UpperCamelCase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be if len(UpperCamelCase ) == sizeCentralDir: __lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file: zip_file.extractall(UpperCamelCase ) zip_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with lzma.open(UpperCamelCase ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile" ) import rarfile os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = rarfile.RarFile(UpperCamelCase ) rf.extractall(UpperCamelCase ) rf.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : int = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard" ) import zstandard as zstd __lowerCAmelCase = zstd.ZstdDecompressor() with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh: dctx.copy_stream(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x42\x5A\x68"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with bza.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr" ) import pyazr os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive: archive.extractall(UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x04\x22\x4D\x18"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4" ) import lza.frame with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) a : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def UpperCAmelCase_ ( cls ) -> Optional[Any]: return max( len(UpperCamelCase ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase , UpperCamelCase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase ) except OSError: return b"" @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool: warnings.warn( "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'infer_extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/> __lowerCAmelCase = cls._get_magic_number_max_length() __lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return extractor_format @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase ) # Prevent parallel extractions __lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) ) with FileLock(UpperCamelCase ): shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg warnings.warn( "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format else: __lowerCAmelCase = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase , UpperCamelCase ) else: warnings.warn( "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0." , category=UpperCamelCase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase ): return extractor.extract(UpperCamelCase , UpperCamelCase )
711
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {} lowerCAmelCase : Dict[Optional[str], str] = {} lowerCAmelCase : Dict[Optional[str], Exception] = {} def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __lowerCAmelCase = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __lowerCAmelCase = format_type def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __lowerCAmelCase = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def __lowerCAmelCase ( lowerCamelCase : Optional[str] ): '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = get_format_type_from_alias(lowerCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
39
0
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] ): # noqa: E741 '''simple docstring''' while r - l > 1: __lowerCAmelCase = (l + r) // 2 if v[m] >= key: __lowerCAmelCase = m else: __lowerCAmelCase = m # noqa: E741 return r def __lowerCAmelCase ( lowerCamelCase : list[int] ): '''simple docstring''' if len(lowerCamelCase ) == 0: return 0 __lowerCAmelCase = [0] * len(lowerCamelCase ) __lowerCAmelCase = 1 __lowerCAmelCase = v[0] for i in range(1 , len(lowerCamelCase ) ): if v[i] < tail[0]: __lowerCAmelCase = v[i] elif v[i] > tail[length - 1]: __lowerCAmelCase = v[i] length += 1 else: __lowerCAmelCase = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
712
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCAmelCase ( lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False __lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowerCAmelCase = [3, 3, 3, 3] __lowerCAmelCase = [5, 5, 5, 5] elif "fl4" in model_name: __lowerCAmelCase = [4, 4, 4, 4] __lowerCAmelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowerCAmelCase = [3, 3, 3, 3] if "lrf" in model_name: __lowerCAmelCase = [3, 3, 3, 3] else: __lowerCAmelCase = [2, 2, 2, 2] if "tiny" in model_name: __lowerCAmelCase = 96 elif "small" in model_name: __lowerCAmelCase = 96 elif "base" in model_name: __lowerCAmelCase = 1_28 elif "large" in model_name: __lowerCAmelCase = 1_92 elif "xlarge" in model_name: __lowerCAmelCase = 2_56 elif "huge" in model_name: __lowerCAmelCase = 3_52 # set label information __lowerCAmelCase = "huggingface/label-files" if "large" in model_name or "huge" in model_name: __lowerCAmelCase = "imagenet-22k-id2label.json" else: __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) ) __lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()} __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = FocalNetConfig( embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , ) return config def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' if "patch_embed.proj" in name: __lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowerCAmelCase = "encoder." + name if "encoder.layers" in name: __lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: __lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: __lowerCAmelCase = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": __lowerCAmelCase = "layernorm.weight" if name == "norm.bias": __lowerCAmelCase = "layernorm.bias" if "head" in name: __lowerCAmelCase = name.replace("head" , "classifier" ) else: __lowerCAmelCase = "focalnet." + name return name def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ): '''simple docstring''' __lowerCAmelCase = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on __lowerCAmelCase = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase ) __lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): __lowerCAmelCase = state_dict.pop(lowerCamelCase ) __lowerCAmelCase = val __lowerCAmelCase = get_focalnet_config(lowerCamelCase ) __lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase ) model.eval() # load state dict model.load_state_dict(lowerCamelCase ) # verify conversion __lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCAmelCase = BitImageProcessor( do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , ) __lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) __lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" ) __lowerCAmelCase = transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) __lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 ) __lowerCAmelCase = model(**lowerCamelCase ) __lowerCAmelCase = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": __lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": __lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": __lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": __lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": __lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: print(f'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(f'''{model_name}''' ) processor.push_to_hub(f'''{model_name}''' ) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase : Optional[int] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
39
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Dict = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = { '''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''', # See all Dinat models at https://huggingface.co/models?filter=dinat } class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): a : List[Any] = """dinat""" a : Dict = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCamelCase=4 , UpperCamelCase=3 , UpperCamelCase=64 , UpperCamelCase=[3, 4, 6, 5] , UpperCamelCase=[2, 4, 8, 16] , UpperCamelCase=7 , UpperCamelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase=3.0 , UpperCamelCase=True , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase="gelu" , UpperCamelCase=0.02 , UpperCamelCase=1E-5 , UpperCamelCase=0.0 , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ) -> List[str]: super().__init__(**UpperCamelCase ) __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = embed_dim __lowerCAmelCase = depths __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = num_heads __lowerCAmelCase = kernel_size __lowerCAmelCase = dilations __lowerCAmelCase = mlp_ratio __lowerCAmelCase = qkv_bias __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = drop_path_rate __lowerCAmelCase = hidden_act __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCamelCase ) - 1) ) __lowerCAmelCase = layer_scale_init_value __lowerCAmelCase = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase ) + 1 )] __lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCamelCase , out_indices=UpperCamelCase , stage_names=self.stage_names )
713
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase : str = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase : Optional[Any] = { '''squeezebert/squeezebert-uncased''': 5_1_2, '''squeezebert/squeezebert-mnli''': 5_1_2, '''squeezebert/squeezebert-mnli-headless''': 5_1_2, } lowerCAmelCase : Tuple = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Dict = VOCAB_FILES_NAMES a : Any = PRETRAINED_VOCAB_FILES_MAP a : Dict = PRETRAINED_INIT_CONFIGURATION a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Optional[Any] = SqueezeBertTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]: super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) __lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars ): __lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) ) __lowerCAmelCase = do_lower_case __lowerCAmelCase = strip_accents __lowerCAmelCase = tokenize_chinese_chars __lowerCAmelCase = normalizer_class(**UpperCamelCase ) __lowerCAmelCase = do_lower_case def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str: __lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]: __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]: __lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase )
39
0
'''simple docstring''' from __future__ import annotations import math from collections.abc import Callable def __lowerCAmelCase ( lowerCamelCase : Callable[[int | float], int | float] , lowerCamelCase : int | float , lowerCamelCase : int | float , lowerCamelCase : int = 1_00 , ): '''simple docstring''' __lowerCAmelCase = x_start __lowerCAmelCase = fnc(lowerCamelCase ) __lowerCAmelCase = 0.0 for _ in range(lowerCamelCase ): # Approximates curve as a sequence of linear lines and sums their length __lowerCAmelCase = (x_end - x_start) / steps + xa __lowerCAmelCase = fnc(lowerCamelCase ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step __lowerCAmelCase = xa __lowerCAmelCase = fxa return length if __name__ == "__main__": def __lowerCAmelCase ( lowerCamelCase : Any ): '''simple docstring''' return math.sin(10 * x ) print('''f(x) = sin(10 * x)''') print('''The length of the curve from x = -10 to x = 10 is:''') lowerCAmelCase : Tuple = 1_0 while i <= 1_0_0_0_0_0: print(f'With {i} steps: {line_length(f, -1_0, 1_0, i)}') i *= 1_0
714
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( lowerCamelCase : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(lowerCamelCase ) / len(lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
39
0
'''simple docstring''' import re from filelock import FileLock try: import nltk lowerCAmelCase : Dict = True except (ImportError, ModuleNotFoundError): lowerCAmelCase : Tuple = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' re.sub("<n>" , "" , lowerCamelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(lowerCamelCase ) )
715
'''simple docstring''' import re def __lowerCAmelCase ( lowerCamelCase : str ): '''simple docstring''' __lowerCAmelCase = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
39
0
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase : List[Any] = logging.getLogger(__name__) def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=lowerCamelCase , default=10_00 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=lowerCamelCase , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=lowerCamelCase , type=lowerCamelCase , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=lowerCamelCase , default=5_12 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) __lowerCAmelCase = parser.parse_args() return args def __lowerCAmelCase ( lowerCamelCase : int ): '''simple docstring''' def fn(lowerCamelCase : Optional[Any] ): return tokenizer(examples["text"] ) return fn def __lowerCAmelCase ( lowerCamelCase : List[str] ): '''simple docstring''' __lowerCAmelCase = [] for i in range(len(tokenized_data["input_ids"] ) ): __lowerCAmelCase = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } __lowerCAmelCase = tf.train.Features(feature=lowerCamelCase ) __lowerCAmelCase = tf.train.Example(features=lowerCamelCase ) __lowerCAmelCase = example.SerializeToString() records.append(lowerCamelCase ) return records def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ): '''simple docstring''' __lowerCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: __lowerCAmelCase = min(len(lowerCamelCase ) , args.limit ) __lowerCAmelCase = dataset.select(range(lowerCamelCase ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) __lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) __lowerCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowerCamelCase ): os.makedirs(lowerCamelCase ) else: __lowerCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. __lowerCAmelCase = tokenize_function(lowerCamelCase ) __lowerCAmelCase = dataset.map(lowerCamelCase , batched=lowerCamelCase , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCamelCase : Optional[Any] ): # Concatenate all texts. __lowerCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} __lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 __lowerCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. __lowerCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase , args.max_length )] for k, t in concatenated_examples.items() } return result __lowerCAmelCase = dataset_tokenized.map(lowerCamelCase , batched=lowerCamelCase , batch_size=10_00 , num_proc=4 ) __lowerCAmelCase = 0 __lowerCAmelCase = 0 for shard in range(0 , len(lowerCamelCase ) , args.shard_size ): __lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size] __lowerCAmelCase = len(dataset_snapshot["input_ids"] ) __lowerCAmelCase = os.path.join(lowerCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) __lowerCAmelCase = get_serialized_examples(lowerCamelCase ) with tf.io.TFRecordWriter(lowerCamelCase ) as out_file: for i in range(len(lowerCamelCase ) ): __lowerCAmelCase = serialized_examples[i] out_file.write(lowerCamelCase ) print("Wrote file {} containing {} records".format(lowerCamelCase , lowerCamelCase ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , "w" ) as f: print(f'''Total {args.split} records: {total_records}''' , file=lowerCamelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = parse_args() main(args)
716
'''simple docstring''' import os import sys import unittest lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class UpperCAmelCase__ ( unittest.TestCase ): def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = {"BertModelTest": "BertModelTester"} __lowerCAmelCase = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase ) __lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase ) __lowerCAmelCase = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } __lowerCAmelCase = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase ) __lowerCAmelCase = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } __lowerCAmelCase = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
39
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = """dpt""" def __init__( self , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=384 , UpperCamelCase=16 , UpperCamelCase=3 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=[2, 5, 8, 11] , UpperCamelCase="project" , UpperCamelCase=[4, 2, 1, 0.5] , UpperCamelCase=[96, 192, 384, 768] , UpperCamelCase=256 , UpperCamelCase=-1 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=0.4 , UpperCamelCase=255 , UpperCamelCase=0.1 , UpperCamelCase=[1, 1024, 24, 24] , UpperCamelCase=[0, 1] , UpperCamelCase=None , **UpperCamelCase , ) -> Optional[Any]: super().__init__(**UpperCamelCase ) __lowerCAmelCase = hidden_size __lowerCAmelCase = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone." ) __lowerCAmelCase = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } __lowerCAmelCase = BitConfig(**UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): logger.info("Initializing the config with a `BiT` backbone." ) __lowerCAmelCase = BitConfig(**UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): __lowerCAmelCase = backbone_config else: raise ValueError( F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) __lowerCAmelCase = backbone_featmap_shape __lowerCAmelCase = neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." ) else: __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = [] __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = qkv_bias __lowerCAmelCase = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" ) __lowerCAmelCase = readout_type __lowerCAmelCase = reassemble_factors __lowerCAmelCase = neck_hidden_sizes __lowerCAmelCase = fusion_hidden_size __lowerCAmelCase = head_in_index __lowerCAmelCase = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) __lowerCAmelCase = use_auxiliary_head __lowerCAmelCase = auxiliary_loss_weight __lowerCAmelCase = semantic_loss_ignore_index __lowerCAmelCase = semantic_classifier_dropout def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __lowerCAmelCase = self.backbone_config.to_dict() __lowerCAmelCase = self.__class__.model_type return output
717
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class UpperCAmelCase__ ( UpperCamelCase__ ): a : torch.FloatTensor class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]: super().__init__() __lowerCAmelCase = num_attention_heads __lowerCAmelCase = attention_head_dim __lowerCAmelCase = num_attention_heads * attention_head_dim __lowerCAmelCase = in_channels __lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) # 3. Define transformers blocks __lowerCAmelCase = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , ) for d in range(UpperCamelCase ) ] ) __lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape __lowerCAmelCase = batch_frames // num_frames __lowerCAmelCase = hidden_states __lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __lowerCAmelCase = self.norm(UpperCamelCase ) __lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = self.proj_in(UpperCamelCase ) # 2. Blocks for block in self.transformer_blocks: __lowerCAmelCase = block( UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , ) # 3. Output __lowerCAmelCase = self.proj_out(UpperCamelCase ) __lowerCAmelCase = ( hidden_states[None, None, :] .reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=UpperCamelCase )
39
0
'''simple docstring''' import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : int = CpmAntTokenizer a : List[str] = False def UpperCAmelCase_ ( self ) -> Tuple: super().setUp() __lowerCAmelCase = [ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) @tooslow def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" ) __lowerCAmelCase = "今天天气真好!" __lowerCAmelCase = ["今天", "天气", "真", "好", "!"] __lowerCAmelCase = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = "今天天气真好!" __lowerCAmelCase = [tokenizer.bos_token] + tokens __lowerCAmelCase = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) __lowerCAmelCase = tokenizer.decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase )
718
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" __lowerCAmelCase = "f32le" __lowerCAmelCase = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: __lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error __lowerCAmelCase = output_stream[0] __lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ): '''simple docstring''' __lowerCAmelCase = f'''{sampling_rate}''' __lowerCAmelCase = "1" if format_for_conversion == "s16le": __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) __lowerCAmelCase = platform.system() if system == "Linux": __lowerCAmelCase = "alsa" __lowerCAmelCase = "default" elif system == "Darwin": __lowerCAmelCase = "avfoundation" __lowerCAmelCase = ":0" elif system == "Windows": __lowerCAmelCase = "dshow" __lowerCAmelCase = "default" __lowerCAmelCase = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase ) for item in iterator: yield item def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ): '''simple docstring''' if stream_chunk_s is not None: __lowerCAmelCase = stream_chunk_s else: __lowerCAmelCase = chunk_length_s __lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": __lowerCAmelCase = np.intaa __lowerCAmelCase = 2 elif format_for_conversion == "f32le": __lowerCAmelCase = np.floataa __lowerCAmelCase = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: __lowerCAmelCase = chunk_length_s / 6 __lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase , (int, float) ): __lowerCAmelCase = [stride_length_s, stride_length_s] __lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __lowerCAmelCase = datetime.datetime.now() __lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ): # Put everything back in numpy scale __lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase ) __lowerCAmelCase = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) __lowerCAmelCase = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ): '''simple docstring''' __lowerCAmelCase = B"" __lowerCAmelCase , __lowerCAmelCase = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) __lowerCAmelCase = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: __lowerCAmelCase = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator __lowerCAmelCase = (_stride_left, stride_right) __lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride} if stream: __lowerCAmelCase = False yield item __lowerCAmelCase = stride_left __lowerCAmelCase = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: __lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)} if stream: __lowerCAmelCase = False yield item def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' __lowerCAmelCase = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process: while True: __lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
39
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase : Tuple = logging.get_logger(__name__) class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> None: warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
719
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def __lowerCAmelCase ( lowerCamelCase : List[str] ): '''simple docstring''' return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class UpperCAmelCase__ ( UpperCamelCase__ ): @staticmethod def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple: __lowerCAmelCase = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" ) download_parser.set_defaults(func=UpperCamelCase ) def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: __lowerCAmelCase = model __lowerCAmelCase = cache __lowerCAmelCase = force __lowerCAmelCase = trust_remote_code def UpperCAmelCase_ ( self ) -> Any: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
39
0
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] ): '''simple docstring''' __lowerCAmelCase = (boundary[1] - boundary[0]) / steps __lowerCAmelCase = boundary[0] __lowerCAmelCase = boundary[1] __lowerCAmelCase = make_points(lowerCamelCase , lowerCamelCase , lowerCamelCase ) __lowerCAmelCase = 0.0 y += (h / 2.0) * f(lowerCamelCase ) for i in x_i: # print(i) y += h * f(lowerCamelCase ) y += (h / 2.0) * f(lowerCamelCase ) return y def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = a + h while x < (b - h): yield x __lowerCAmelCase = x + h def __lowerCAmelCase ( lowerCamelCase : int ): # enter your function here '''simple docstring''' __lowerCAmelCase = (x - 0) * (x - 0) return y def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = 0.0 # Lower bound of integration __lowerCAmelCase = 1.0 # Upper bound of integration __lowerCAmelCase = 10.0 # define number of steps or resolution __lowerCAmelCase = [a, b] # define boundary of integration __lowerCAmelCase = method_a(lowerCamelCase , lowerCamelCase ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
720
'''simple docstring''' def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __lowerCAmelCase ( ): '''simple docstring''' __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
39
0
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __lowerCAmelCase ( lowerCamelCase : str = "isbn/0140328726" ): '''simple docstring''' __lowerCAmelCase = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes if new_olid.count("/" ) != 1: __lowerCAmelCase = f'''{olid} is not a valid Open Library olid''' raise ValueError(lowerCamelCase ) return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json() def __lowerCAmelCase ( lowerCamelCase : dict ): '''simple docstring''' __lowerCAmelCase = { "title": "Title", "publish_date": "Publish date", "authors": "Authors", "number_of_pages": "Number of pages:", "first_sentence": "First sentence", "isbn_10": "ISBN (10)", "isbn_13": "ISBN (13)", } __lowerCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __lowerCAmelCase = [ get_openlibrary_data(author["key"] )["name"] for author in data["Authors"] ] __lowerCAmelCase = data["First sentence"]["value"] for key, value in data.items(): if isinstance(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = ", ".join(lowerCamelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase : Optional[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (1_0, 1_3) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase : Tuple = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print('''\n'''.join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
721
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class UpperCAmelCase__ ( UpperCamelCase__ ): a : Optional[Any] = """dpr""" def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple: super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = projection_dim __lowerCAmelCase = position_embedding_type
39
0
'''simple docstring''' from itertools import count def __lowerCAmelCase (__lowerCAmelCase = 50 ): _UpperCAmelCase : int = [1] * min_block_length for n in count(__lowerCAmelCase ): fill_count_functions.append(1 ) for block_length in range(__lowerCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_000_000: break return n if __name__ == "__main__": print(F'''{solution() = }''')
40
'''simple docstring''' from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ): if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release: # old versions of hfh don't url-encode the file path _UpperCAmelCase : str = quote(__lowerCAmelCase ) return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
40
1