code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : str = "cpu" , UpperCamelCase : str = "openai/clip-vit-large-patch14" ): '''simple docstring''' __UpperCAmelCase : int = device __UpperCAmelCase : str = CLIPTokenizerFast.from_pretrained(snake_case__ ) __UpperCAmelCase : List[Any] = [0.48145466, 0.4578275, 0.40821073] __UpperCAmelCase : Optional[int] = [0.26862954, 0.26130258, 0.27577711] __UpperCAmelCase : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std ) __UpperCAmelCase : List[str] = torchvision.transforms.Resize(224 ) __UpperCAmelCase : List[Any] = torchvision.transforms.CenterCrop(224 ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.resize(snake_case__ ) __UpperCAmelCase : Union[str, Any] = self.center_crop(snake_case__ ) __UpperCAmelCase : str = self.normalize(snake_case__ ) return images def __call__( self : Optional[int] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , **UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Dict = self.tokenizer(text=snake_case__ , **snake_case__ ) __UpperCAmelCase : str = self.preprocess_img(snake_case__ ) __UpperCAmelCase : Any = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : int , UpperCamelCase : Tuple=10 , UpperCamelCase : List[str]=0.01 , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=None , UpperCamelCase : Dict=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : Any=False , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Any="image" , UpperCamelCase : str=True , UpperCamelCase : Any=False , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[Any]=False , ): '''simple docstring''' super().__init__() __UpperCAmelCase : List[str] = None __UpperCAmelCase : Optional[int] = device if device else get_device() if vqgan: __UpperCAmelCase : List[Any] = vqgan else: __UpperCAmelCase : Tuple = load_vqgan(self.device , conf_path=snake_case__ , ckpt_path=snake_case__ ) self.vqgan.eval() if clip: __UpperCAmelCase : Any = clip else: __UpperCAmelCase : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) __UpperCAmelCase : Optional[Any] = ProcessorGradientFlow(device=self.device ) __UpperCAmelCase : Optional[int] = iterations __UpperCAmelCase : str = lr __UpperCAmelCase : List[Any] = log __UpperCAmelCase : Optional[int] = make_grid __UpperCAmelCase : str = return_val __UpperCAmelCase : List[Any] = quantize __UpperCAmelCase : List[str] = self.vqgan.decoder.z_shape def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict=None , UpperCamelCase : Tuple=None , UpperCamelCase : Any=5 , UpperCamelCase : Optional[Any]=True ): '''simple docstring''' __UpperCAmelCase : List[Any] = [] if output_path is None: __UpperCAmelCase : List[Any] = "./animation.gif" if input_path is None: __UpperCAmelCase : Any = self.save_path __UpperCAmelCase : int = sorted(glob(input_path + """/*""" ) ) if not len(snake_case__ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(snake_case__ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) __UpperCAmelCase : List[Any] = total_duration / len(snake_case__ ) __UpperCAmelCase : Union[str, Any] = [frame_duration] * len(snake_case__ ) if extend_frames: __UpperCAmelCase : Union[str, Any] = 1.5 __UpperCAmelCase : int = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(snake_case__ ) ) imageio.mimsave(snake_case__ , snake_case__ , duration=snake_case__ ) print(f'''gif saved to {output_path}''' ) def lowerCamelCase__ ( self : str , UpperCamelCase : str=None , UpperCamelCase : List[Any]=None ): '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError __UpperCAmelCase : Optional[Any] = preprocess(Image.open(snake_case__ ) , target_image_size=256 ).to(self.device ) __UpperCAmelCase : Any = preprocess_vqgan(snake_case__ ) __UpperCAmelCase : str = self.vqgan.encode(snake_case__ ) return z def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Dict = self.latent.detach().requires_grad_() __UpperCAmelCase : Optional[int] = base_latent + transform_vector if self.quantize: __UpperCAmelCase : Any = self.vqgan.quantize(snake_case__ ) else: __UpperCAmelCase : Union[str, Any] = trans_latent return self.vqgan.decode(snake_case__ ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int]=None ): '''simple docstring''' __UpperCAmelCase : Any = self.clip_preprocessor(text=snake_case__ , images=snake_case__ , return_tensors="""pt""" , padding=snake_case__ ) __UpperCAmelCase : Optional[int] = self.clip(**snake_case__ ) __UpperCAmelCase : List[Any] = clip_outputs.logits_per_image if weights is not None: __UpperCAmelCase : Any = similarity_logits * weights return similarity_logits.sum() def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : Any = self._get_clip_similarity(pos_prompts["""prompts"""] , snake_case__ , weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: __UpperCAmelCase : Union[str, Any] = self._get_clip_similarity(neg_prompts["""prompts"""] , snake_case__ , weights=neg_prompts["""weights"""] ) else: __UpperCAmelCase : Union[str, Any] = torch.tensor([1] , device=self.device ) __UpperCAmelCase : List[Any] = -torch.log(snake_case__ ) + torch.log(snake_case__ ) return loss def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = torch.randn_like(self.latent , requires_grad=snake_case__ , device=self.device ) __UpperCAmelCase : Optional[int] = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() __UpperCAmelCase : Dict = self._add_vector(snake_case__ ) __UpperCAmelCase : Union[str, Any] = loop_post_process(snake_case__ ) __UpperCAmelCase : List[str] = self._get_CLIP_loss(snake_case__ , snake_case__ , snake_case__ ) print("""CLIP loss""" , snake_case__ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=snake_case__ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' wandb.init(reinit=snake_case__ , project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: __UpperCAmelCase : Optional[Any] = Image.open(snake_case__ ) __UpperCAmelCase : Dict = image.resize((256, 256) ) wandb.log("""Original Image""" , wandb.Image(snake_case__ ) ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' if not prompts: return [] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[str] = [] if isinstance(snake_case__ , snake_case__ ): __UpperCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(snake_case__ , (tuple, list) ): __UpperCAmelCase : Tuple = prompt[0] __UpperCAmelCase : Optional[int] = float(prompt[1] ) elif ":" in prompt: __UpperCAmelCase : Optional[int] = prompt.split(""":""" ) __UpperCAmelCase : List[str] = float(snake_case__ ) else: __UpperCAmelCase : Optional[Any] = prompt __UpperCAmelCase : Tuple = 1.0 processed_prompts.append(snake_case__ ) weights.append(snake_case__ ) return { "prompts": processed_prompts, "weights": torch.tensor(snake_case__ , device=self.device ), } def lowerCamelCase__ ( self : Dict , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : str=None , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Any=False , UpperCamelCase : List[Any]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Dict=None , ): '''simple docstring''' if image_path: __UpperCAmelCase : List[Any] = self._get_latent(snake_case__ ) else: __UpperCAmelCase : Optional[int] = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(snake_case__ , snake_case__ , snake_case__ ) assert pos_prompts, "You must provide at least one positive prompt." __UpperCAmelCase : Optional[Any] = self.process_prompts(snake_case__ ) __UpperCAmelCase : List[Any] = self.process_prompts(snake_case__ ) if save_final and save_path is None: __UpperCAmelCase : Tuple = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(snake_case__ ): os.makedirs(snake_case__ ) else: __UpperCAmelCase : int = save_path + "_" + get_timestamp() os.makedirs(snake_case__ ) __UpperCAmelCase : Tuple = save_path __UpperCAmelCase : str = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(snake_case__ ) ) __UpperCAmelCase : List[str] = loop_post_process(snake_case__ ) for iter, transformed_img in enumerate(self._optimize_CLIP(snake_case__ , snake_case__ , snake_case__ ) ): if show_intermediate: show_pil(snake_case__ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) ) if self.log: wandb.log({"""Image""": wandb.Image(snake_case__ )} ) if show_final: show_pil(snake_case__ ) if save_final: transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
115
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
59
0
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand __lowerCAmelCase : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCamelCase ( _lowerCamelCase : str ): if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(__lowerCamelCase ): return ext raise Exception( F"Unable to determine file format from file extension {path}. " F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" ) def UpperCamelCase ( _lowerCamelCase : Tuple ): A__ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) A__ = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format A__ = PipelineDataFormat.from_str( format=__lowerCamelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(__lowerCamelCase , __lowerCamelCase ) class UpperCAmelCase ( A_ ): def __init__( self :str , lowercase_ :Pipeline , lowercase_ :PipelineDataFormat )-> Dict: A__ = nlp A__ = reader @staticmethod def UpperCAmelCase_ ( lowercase_ :ArgumentParser )-> Optional[Any]: A__ = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=snake_case__ , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=snake_case__ , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=snake_case__ , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=snake_case__ , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=snake_case__ , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=snake_case__ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=snake_case__ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=snake_case__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=snake_case__ ) def UpperCAmelCase_ ( self :Any )-> Optional[Any]: A__ = self._nlp, [] for entry in self._reader: A__ = nlp(**snake_case__ ) if self._reader.is_multi_columns else nlp(snake_case__ ) if isinstance(snake_case__ , snake_case__ ): outputs.append(snake_case__ ) else: outputs += output # Saving data if self._nlp.binary_output: A__ = self._reader.save_binary(snake_case__ ) logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" ) else: self._reader.save(snake_case__ )
237
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase : def __init__(self : Dict , snake_case__ : Dict , snake_case__ : Any=13 , snake_case__ : Any=32 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : List[Any]=16 , snake_case__ : int=[1, 2, 1] , snake_case__ : Dict=[2, 2, 4] , snake_case__ : Dict=2 , snake_case__ : Tuple=2.0 , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=True , snake_case__ : List[str]=0.02 , snake_case__ : int=1e-5 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=10 , snake_case__ : Optional[Any]=8 , snake_case__ : Any=["stage1", "stage2", "stage3"] , snake_case__ : Tuple=[1, 2, 3] , ) -> Union[str, Any]: '''simple docstring''' snake_case : Any = parent snake_case : Optional[int] = batch_size snake_case : Union[str, Any] = image_size snake_case : Dict = patch_size snake_case : Optional[Any] = num_channels snake_case : Union[str, Any] = embed_dim snake_case : int = depths snake_case : List[str] = num_heads snake_case : Union[str, Any] = window_size snake_case : Union[str, Any] = mlp_ratio snake_case : List[Any] = qkv_bias snake_case : List[Any] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Union[str, Any] = drop_path_rate snake_case : int = hidden_act snake_case : Optional[int] = use_absolute_embeddings snake_case : int = patch_norm snake_case : Union[str, Any] = layer_norm_eps snake_case : Any = initializer_range snake_case : Optional[Any] = is_training snake_case : Tuple = scope snake_case : Optional[int] = use_labels snake_case : Optional[Any] = type_sequence_label_size snake_case : Union[str, Any] = encoder_stride snake_case : Any = out_features snake_case : Tuple = out_indices def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case : int = None if self.use_labels: snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : Dict = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int: '''simple docstring''' return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) snake_case : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> str: '''simple docstring''' snake_case : Optional[int] = MaskFormerSwinBackbone(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case__ ): snake_case : Tuple = ["stem"] snake_case : List[Any] = MaskFormerSwinBackbone(config=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' snake_case : Union[str, Any] = self.prepare_config_and_inputs() snake_case , snake_case , snake_case : List[Any] = config_and_inputs snake_case : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ): A__ : List[str] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) A__ : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} A__ : Optional[Any] = False A__ : List[Any] = False A__ : List[str] = False A__ : List[str] = False A__ : Union[str, Any] = False def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : str = MaskFormerSwinModelTester(self ) snake_case : Optional[int] = ConfigTester(self , config_class=snake_case__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]: '''simple docstring''' return def _SCREAMING_SNAKE_CASE (self : Dict ) -> str: '''simple docstring''' snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Dict: '''simple docstring''' snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case__ ) @unittest.skip("Swin does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' pass @unittest.skip("Swin does not support feedforward chunking" ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : int = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : str = model_class(snake_case__ ) snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case : Optional[Any] = [*signature.parameters.keys()] snake_case : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str: '''simple docstring''' pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ) -> Optional[int]: '''simple docstring''' snake_case : Tuple = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): snake_case : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) snake_case : int = outputs.hidden_states snake_case : Union[str, Any] = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) # Swin has a different seq_length snake_case : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case : int = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Dict = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Any = 3 snake_case : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case : str = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Optional[Any] = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : int ) -> str: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case__ : Union[str, Any] ): snake_case : Any = 0 return t def check_equivalence(snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[int]={} ): with torch.no_grad(): snake_case : Optional[Any] = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ) snake_case : Tuple = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple() def recursive_check(snake_case__ : List[str] , snake_case__ : Optional[Any] ): if isinstance(snake_case__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ): recursive_check(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case__ , snake_case__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1e-5 ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has""" f""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}.""" ) , ) recursive_check(snake_case__ , snake_case__ ) for model_class in self.all_model_classes: snake_case : Optional[int] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Dict = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) snake_case : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) @require_torch class UpperCAmelCase ( unittest.TestCase ,A_ ): A__ : int = (MaskFormerSwinBackbone,) if is_torch_available() else () A__ : int = MaskFormerSwinConfig def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModelTester(self ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Optional[int] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: snake_case : Optional[int] = backbone_class(snake_case__ ) backbone.to(snake_case__ ) backbone.eval() snake_case : Union[str, Any] = backbone(**snake_case__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case : Optional[int] = backbone(**snake_case__ , output_hidden_states=snake_case__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case , snake_case , snake_case : Dict = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case : Optional[Any] = backbone(**snake_case__ , output_attentions=snake_case__ ) self.assertIsNotNone(outputs.attentions )
59
0
import operator as op __lowercase = '''scaler.pt''' __lowercase = '''pytorch_model''' __lowercase = '''random_states''' __lowercase = '''optimizer''' __lowercase = '''scheduler''' __lowercase = '''pytorch_model.bin''' __lowercase = '''pytorch_model.bin.index.json''' __lowercase = '''model.safetensors''' __lowercase = '''model.safetensors.index.json''' __lowercase = '''1.10.2''' __lowercase = '''py38''' __lowercase = '''4.17.0''' __lowercase = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge'''] __lowercase = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2'''] __lowercase = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP'''] __lowercase = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH'''] __lowercase = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT'''] __lowercase = '''2.0.1''' __lowercase = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich'''] __lowercase = ['''default''', '''reduce-overhead''', '''max-autotune'''] __lowercase = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 __lowercase = [ '''nnodes''', '''nproc_per_node''', '''rdzv_backend''', '''rdzv_endpoint''', '''rdzv_id''', '''rdzv_conf''', '''standalone''', '''max_restarts''', '''monitor_interval''', '''start_method''', '''role''', '''module''', '''m''', '''no_python''', '''run_path''', '''log_dir''', '''r''', '''redirects''', '''t''', '''tee''', '''node_rank''', '''master_addr''', '''master_port''', ] __lowercase = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM'''] __lowercase = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
43
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : List[str] = [] snake_case : Optional[int] = [] snake_case : Any = [] for rt in rc.restypes: snake_case : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) snake_case : str = {name: i for i, name in enumerate(__lowerCamelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) snake_case : Optional[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : List[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : int = torch.tensor( __lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) snake_case : int = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein snake_case : List[Any] = restype_atomaa_to_atomaa[protein_aatype] snake_case : str = restype_atomaa_mask[protein_aatype] snake_case : str = residx_atomaa_mask snake_case : Any = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back snake_case : List[str] = restype_atomaa_to_atomaa[protein_aatype] snake_case : List[Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask snake_case : Union[str, Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device ) for restype, restype_letter in enumerate(rc.restypes ): snake_case : Optional[int] = rc.restype_atoa[restype_letter] snake_case : Any = rc.residue_atoms[restype_name] for atom_name in atom_names: snake_case : List[Any] = rc.atom_order[atom_name] snake_case : Optional[Any] = 1 snake_case : List[Any] = restype_atomaa_mask[protein_aatype] snake_case : int = residx_atomaa_mask return protein def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : Dict = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray ) snake_case : List[str] = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) ) return out
59
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : str = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
238
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __lowerCamelCase = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } __lowerCamelCase = { """unc-nlp/lxmert-base-uncased""": 5_12, } __lowerCamelCase = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class UpperCAmelCase ( A_ ): A__ : Any = VOCAB_FILES_NAMES A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A__ : Tuple = PRETRAINED_INIT_CONFIGURATION A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : List[Any] = LxmertTokenizer def __init__(self : Dict , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Optional[Any]="[SEP]" , snake_case__ : Optional[Any]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : Tuple="[MASK]" , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Dict , ) -> Optional[int]: '''simple docstring''' super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars ): snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) ) snake_case : str = do_lower_case snake_case : List[Any] = strip_accents snake_case : Optional[int] = tokenize_chinese_chars snake_case : int = normalizer_class(**snake_case__ ) snake_case : Optional[Any] = do_lower_case def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ) -> Any: '''simple docstring''' snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case : Optional[Any] = [self.sep_token_id] snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' snake_case : List[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
59
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer lowerCamelCase__ : Dict = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast lowerCamelCase__ : Union[str, Any] = TaTokenizerFast lowerCamelCase__ : int = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Tuple = [ 'MT5EncoderModel', 'MT5ForConditionalGeneration', 'MT5ForQuestionAnswering', 'MT5Model', 'MT5PreTrainedModel', 'MT5Stack', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[Any] = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model'] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys lowerCamelCase__ : Optional[int] = _LazyModule( __name__, globals()['__file__'], _import_structure, extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast}, module_spec=__spec__, )
225
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase ( A_ ): A__ : Dict = (DDIMParallelScheduler,) A__ : Tuple = (("eta", 0.0), ("num_inference_steps", 50)) def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case : Any = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**snake_case__ ) return config def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Optional[int] ) -> Any: '''simple docstring''' snake_case : List[Any] = self.scheduler_classes[0] snake_case : Any = self.get_scheduler_config(**snake_case__ ) snake_case : Any = scheduler_class(**snake_case__ ) snake_case , snake_case : Union[str, Any] = 10, 0.0 snake_case : List[Any] = self.dummy_model() snake_case : Any = self.dummy_sample_deter scheduler.set_timesteps(snake_case__ ) for t in scheduler.timesteps: snake_case : Optional[int] = model(snake_case__ , snake_case__ ) snake_case : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample return sample def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str: '''simple docstring''' for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=snake_case__ ) snake_case : Optional[int] = self.scheduler_classes[0] snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 ) snake_case : Union[str, Any] = scheduler_class(**snake_case__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : str ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]: '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any: '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]: '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=snake_case__ , eta=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case : Dict = self.scheduler_classes[0] snake_case : Tuple = self.get_scheduler_config() snake_case : Dict = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5 def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict: '''simple docstring''' snake_case : Union[str, Any] = self.scheduler_classes[0] snake_case : List[Any] = self.get_scheduler_config() snake_case : int = scheduler_class(**snake_case__ ) snake_case , snake_case : Any = 10, 0.0 scheduler.set_timesteps(snake_case__ ) snake_case : Optional[Any] = self.dummy_model() snake_case : str = self.dummy_sample_deter snake_case : Dict = self.dummy_sample_deter + 0.1 snake_case : Dict = self.dummy_sample_deter - 0.1 snake_case : Optional[Any] = samplea.shape[0] snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 ) snake_case : Tuple = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ ) snake_case : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) snake_case : List[str] = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ ) snake_case : Dict = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case : List[Any] = self.full_loop() snake_case : Optional[Any] = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.223967 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = self.full_loop(prediction_type="v_prediction" ) snake_case : int = torch.sum(torch.abs(snake_case__ ) ) snake_case : Optional[int] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]: '''simple docstring''' snake_case : Dict = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) snake_case : str = torch.sum(torch.abs(snake_case__ ) ) snake_case : Optional[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]: '''simple docstring''' snake_case : int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) snake_case : Tuple = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
59
0
"""simple docstring""" import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def a_ ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ): UpperCAmelCase__ = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_config(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) AutoTokenizer.from_pretrained(__lowerCamelCase ).save_pretrained(__lowerCamelCase ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
98
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ): snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] snake_case : int = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(__lowerCamelCase ) <= key: return input_string for position, character in enumerate(__lowerCamelCase ): snake_case : Any = position % (lowest * 2) # puts it in bounds snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__lowerCamelCase ) snake_case : List[str] = ["".join(__lowerCamelCase ) for row in temp_grid] snake_case : Tuple = "".join(__lowerCamelCase ) return output_string def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ): snake_case : Dict = [] snake_case : Union[str, Any] = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] # generates template for position in range(len(__lowerCamelCase ) ): snake_case : List[str] = position % (lowest * 2) # puts it in bounds snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) snake_case : Tuple = 0 for row in temp_grid: # fills in the characters snake_case : Union[str, Any] = input_string[counter : counter + len(__lowerCamelCase )] grid.append(list(__lowerCamelCase ) ) counter += len(__lowerCamelCase ) snake_case : str = "" # reads as zigzag for position in range(len(__lowerCamelCase ) ): snake_case : Optional[int] = position % (lowest * 2) # puts it in bounds snake_case : Tuple = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def UpperCamelCase ( __lowerCamelCase : str ): snake_case : Tuple = {} for key_guess in range(1 , len(__lowerCamelCase ) ): # tries every key snake_case : Any = decrypt(__lowerCamelCase , __lowerCamelCase ) return results if __name__ == "__main__": import doctest doctest.testmod()
59
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCAmelCase__ ( A_ ): '''simple docstring''' __UpperCamelCase = "dandelin/vilt-b32-finetuned-vqa" __UpperCamelCase = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) __UpperCamelCase = "image_qa" __UpperCamelCase = AutoProcessor __UpperCamelCase = AutoModelForVisualQuestionAnswering __UpperCamelCase = ["image", "text"] __UpperCamelCase = ["text"] def __init__( self : int , *lowercase_ : int , **lowercase_ : str): '''simple docstring''' requires_backends(self , ['''vision''']) super().__init__(*snake_case__ , **snake_case__) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : "Image" , lowercase_ : str): '''simple docstring''' return self.pre_processor(snake_case__ , snake_case__ , return_tensors='''pt''') def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int): '''simple docstring''' with torch.no_grad(): return self.model(**snake_case__).logits def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.argmax(-1).item() return self.model.config.idalabel[idx]
91
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __lowerCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __lowerCamelCase = TaTokenizerFast __lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __lowerCamelCase = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
59
0
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase ): return "".join(chr(ord(__lowerCamelCase ) - 32 ) if 'a' <= char <= 'z' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
86
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class UpperCAmelCase : def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]: '''simple docstring''' logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) snake_case : Optional[Any] = model snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ ) snake_case : int = kwargs.get("latest_model_name" , snake_case__ ) def __call__(self : Tuple , **snake_case__ : str ) -> List[str]: '''simple docstring''' snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()} return self.model.run(snake_case__ , snake_case__ ) @staticmethod def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any: '''simple docstring''' if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) snake_case : Optional[int] = "CPUExecutionProvider" return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]: '''simple docstring''' snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name ) snake_case : str = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ ) if src_path.exists(): snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str: '''simple docstring''' if os.path.isfile(snake_case__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(snake_case__ , exist_ok=snake_case__ ) # saving model weights/files self._save_pretrained(snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple: '''simple docstring''' snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(snake_case__ ): snake_case : Any = OnnxRuntimeModel.load_model( os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ ) snake_case : Union[str, Any] = Path(snake_case__ ) # load model from hub else: # download model snake_case : Dict = hf_hub_download( repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , ) snake_case : List[Any] = Path(snake_case__ ).parent snake_case : Union[str, Any] = Path(snake_case__ ).name snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ ) return cls(model=snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = None if len(str(snake_case__ ).split("@" ) ) == 2: snake_case , snake_case : int = model_id.split("@" ) return cls._from_pretrained( model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
59
0
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __A : def __init__(self : Dict , __a : Dict , __a : Any=13 , __a : Any=32 , __a : Optional[Any]=2 , __a : Union[str, Any]=3 , __a : List[Any]=16 , __a : int=[1, 2, 1] , __a : Dict=[2, 2, 4] , __a : Dict=2 , __a : Tuple=2.0 , __a : Optional[int]=True , __a : Union[str, Any]=0.0 , __a : Any=0.0 , __a : Union[str, Any]=0.1 , __a : int="gelu" , __a : Optional[int]=False , __a : List[Any]=True , __a : List[str]=0.02 , __a : int=1E-5 , __a : List[str]=True , __a : Union[str, Any]=None , __a : List[Any]=True , __a : Optional[Any]=10 , __a : Optional[Any]=8 , __a : Any=["stage1", "stage2", "stage3"] , __a : Tuple=[1, 2, 3] , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = embed_dim UpperCAmelCase_ = depths UpperCAmelCase_ = num_heads UpperCAmelCase_ = window_size UpperCAmelCase_ = mlp_ratio UpperCAmelCase_ = qkv_bias UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = drop_path_rate UpperCAmelCase_ = hidden_act UpperCAmelCase_ = use_absolute_embeddings UpperCAmelCase_ = patch_norm UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = initializer_range UpperCAmelCase_ = is_training UpperCAmelCase_ = scope UpperCAmelCase_ = use_labels UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = encoder_stride UpperCAmelCase_ = out_features UpperCAmelCase_ = out_indices def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = self.get_config() return config, pixel_values, labels def _lowercase (self : List[str] ): return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowercase (self : Tuple , __a : List[Any] , __a : List[str] , __a : Tuple ): UpperCAmelCase_ = MaskFormerSwinModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase_ = model(snake_case__ ) UpperCAmelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _lowercase (self : List[str] , __a : Union[str, Any] , __a : List[Any] , __a : Union[str, Any] ): UpperCAmelCase_ = MaskFormerSwinBackbone(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase_ = model(snake_case__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case__ ): UpperCAmelCase_ = ["stem"] UpperCAmelCase_ = MaskFormerSwinBackbone(config=snake_case__ ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ = config_and_inputs UpperCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __A ( A_ , A_ , unittest.TestCase ): a__ : List[str] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) a__ : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} a__ : Optional[Any] = False a__ : List[Any] = False a__ : List[str] = False a__ : List[str] = False a__ : Union[str, Any] = False def _lowercase (self : Optional[int] ): UpperCAmelCase_ = MaskFormerSwinModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=snake_case__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def _lowercase (self : str ): pass def _lowercase (self : str ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase (self : Tuple ): return def _lowercase (self : Dict ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _lowercase (self : int ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case__ ) @unittest.skip("Swin does not use inputs_embeds" ) def _lowercase (self : int ): pass @unittest.skip("Swin does not support feedforward chunking" ) def _lowercase (self : List[str] ): pass def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(snake_case__ ) UpperCAmelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def _lowercase (self : List[Any] ): pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def _lowercase (self : Tuple ): pass def _lowercase (self : Optional[Any] , __a : List[Any] , __a : str , __a : List[Any] , __a : Tuple ): UpperCAmelCase_ = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): UpperCAmelCase_ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) UpperCAmelCase_ = outputs.hidden_states UpperCAmelCase_ = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) # Swin has a different seq_length UpperCAmelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _lowercase (self : Dict ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase_ = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def _lowercase (self : int ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase_ = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def _lowercase (self : Optional[int] ): pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _lowercase (self : str ): pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _lowercase (self : int ): pass def _lowercase (self : Any ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__a : Union[str, Any] ): UpperCAmelCase_ = 0 return t def check_equivalence(__a : Union[str, Any] , __a : int , __a : List[str] , __a : Optional[int]={} ): with torch.no_grad(): UpperCAmelCase_ = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ) UpperCAmelCase_ = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple() def recursive_check(__a : List[str] , __a : Optional[Any] ): if isinstance(snake_case__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ): recursive_check(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case__ , snake_case__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1E-5 ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has""" f""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}.""" ) , ) recursive_check(snake_case__ , snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase_ = self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase_ = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase_ = self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase_ = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) UpperCAmelCase_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) @require_torch class __A ( unittest.TestCase , A_ ): a__ : int = (MaskFormerSwinBackbone,) if is_torch_available() else () a__ : int = MaskFormerSwinConfig def _lowercase (self : List[Any] ): UpperCAmelCase_ = MaskFormerSwinModelTester(self ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: UpperCAmelCase_ = backbone_class(snake_case__ ) backbone.to(snake_case__ ) backbone.eval() UpperCAmelCase_ = backbone(**snake_case__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True UpperCAmelCase_ = backbone(**snake_case__ , output_hidden_states=snake_case__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) UpperCAmelCase_ = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: UpperCAmelCase_ = backbone(**snake_case__ , output_attentions=snake_case__ ) self.assertIsNotNone(outputs.attentions )
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger() @dataclass class UpperCAmelCase : A__ : nn.Module A__ : List[nn.Module] = field(default_factory=A_ ) A__ : list = field(default_factory=A_ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Tensor , snake_case__ : Tensor ) -> Optional[Any]: '''simple docstring''' snake_case : List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case__ ) def __call__(self : List[Any] , snake_case__ : Tensor ) -> List[Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case__ ) [x.remove() for x in self.handles] return self @property def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]: '''simple docstring''' return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class UpperCAmelCase : A__ : nn.Module A__ : nn.Module A__ : int = 1 A__ : List = field(default_factory=A_ ) A__ : List = field(default_factory=A_ ) A__ : bool = True def __call__(self : List[Any] , snake_case__ : Tensor ) -> Any: '''simple docstring''' snake_case : str = Tracker(self.dest )(snake_case__ ).parametrized snake_case : Optional[int] = Tracker(self.src )(snake_case__ ).parametrized snake_case : List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) ) snake_case : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) ) if len(snake_case__ ) != len(snake_case__ ) and self.raise_if_mismatch: raise Exception( f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while""" f""" destination module has {len(snake_case__ )}.""" ) for dest_m, src_m in zip(snake_case__ , snake_case__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) class UpperCAmelCase ( nn.Module ): def __init__(self : Tuple , snake_case__ : nn.Module ) -> Optional[Any]: '''simple docstring''' super().__init__() snake_case : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block" ), f"""Unexpected layer name {k}""" snake_case : Union[str, Any] = len(snake_case__ ) + 1 feature_blocks.append((f"""res{block_index}""", v) ) snake_case : Optional[Any] = nn.ModuleDict(snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tensor ) -> Dict: '''simple docstring''' return get_trunk_forward_outputs( snake_case__ , out_feat_keys=snake_case__ , feature_blocks=self._feature_blocks , ) class UpperCAmelCase ( A_ ): def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> str: '''simple docstring''' snake_case : List[Any] = x.split("-" ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__(self : Optional[int] , snake_case__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]: '''simple docstring''' if x not in self: snake_case : Dict = self.convert_name_to_timm(snake_case__ ) snake_case : Union[str, Any] = partial(lambda: (timm.create_model(snake_case__ , pretrained=snake_case__ ).eval(), None) ) else: snake_case : List[str] = super().__getitem__(snake_case__ ) return val class UpperCAmelCase ( A_ ): def __getitem__(self : Dict , snake_case__ : str ) -> Callable[[], nn.Module]: '''simple docstring''' if "seer" in x and "in1k" not in x: snake_case : str = RegNetModel else: snake_case : Optional[Any] = RegNetForImageClassification return val def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Tuple[str, str]] ): for from_key, to_key in keys: snake_case : str = from_state_dict[from_key].clone() print(f"""Copied key={from_key} to={to_key}""" ) return to_state_dict def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : RegNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True , ): print(f"""Converting {name}...""" ) with torch.no_grad(): snake_case , snake_case : int = from_model_func() snake_case : str = our_model_func(__lowerCamelCase ).eval() snake_case : int = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase , raise_if_mismatch=__lowerCamelCase ) snake_case : Dict = torch.randn((1, 3, 224, 224) ) module_transfer(__lowerCamelCase ) if from_state_dict is not None: snake_case : str = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: snake_case : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] snake_case : Optional[Any] = manually_copy_vissl_head(__lowerCamelCase , our_model.state_dict() , __lowerCamelCase ) our_model.load_state_dict(__lowerCamelCase ) snake_case : Any = our_model(__lowerCamelCase , output_hidden_states=__lowerCamelCase ) snake_case : Union[str, Any] = ( our_outputs.logits if isinstance(__lowerCamelCase , __lowerCamelCase ) else our_outputs.last_hidden_state ) snake_case : Union[str, Any] = from_model(__lowerCamelCase ) snake_case : Dict = from_output[-1] if type(__lowerCamelCase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: snake_case : Any = our_outputs.hidden_states[-1] assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowerCamelCase , ) snake_case : List[str] = 224 if "seer" not in name else 384 # we can use the convnext one snake_case : int = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowerCamelCase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowerCamelCase , ) print(f"""Pushed {name}""" ) def UpperCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ): snake_case : Union[str, Any] = "imagenet-1k-id2label.json" snake_case : List[str] = 1000 snake_case : List[str] = (1, num_labels) snake_case : Any = "huggingface/label-files" snake_case : List[str] = num_labels snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) ) snake_case : List[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} snake_case : str = idalabel snake_case : List[Any] = {v: k for k, v in idalabel.items()} snake_case : Dict = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) snake_case : Optional[Any] = { "regnet-x-002": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ), "regnet-x-004": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ), "regnet-x-006": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ), "regnet-x-008": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ), "regnet-x-016": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ), "regnet-x-032": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ), "regnet-x-040": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ), "regnet-x-064": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ), "regnet-x-080": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ), "regnet-x-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ), "regnet-x-160": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ), "regnet-x-320": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ), # y variant "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), "regnet-y-004": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), "regnet-y-006": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), "regnet-y-008": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), "regnet-y-016": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), "regnet-y-032": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), "regnet-y-040": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), "regnet-y-064": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), "regnet-y-080": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), "regnet-y-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), "regnet-y-160": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), "regnet-y-320": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer": RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer": RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } snake_case : Union[str, Any] = NameToOurModelFuncMap() snake_case : str = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(__lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]: snake_case : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , model_dir=str(__lowerCamelCase ) , map_location="cpu" ) snake_case : Dict = model_func() # check if we have a head, if yes add it snake_case : str = files["classy_state_dict"]["base_model"]["model"] snake_case : Dict = model_state_dict["trunk"] model.load_state_dict(__lowerCamelCase ) return model.eval(), model_state_dict["heads"] # pretrained snake_case : List[Any] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : Optional[int] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : List[str] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case : Tuple = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned snake_case : List[Any] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : Tuple = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : str = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case : Dict = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( __lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( __lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported regnet* architecture,""" """ currently: regnetx-*, regnety-*. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
59
0
from scipy.stats import spearmanr import datasets __A =''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' __A =''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' __A =R'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def SCREAMING_SNAKE_CASE_( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> int: lowerCamelCase_ = spearmanr(snake_case__ , snake_case__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
19
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def UpperCamelCase ( __lowerCamelCase : List[Any] ): return 1.0 / (1.0 + np.exp(-_outputs )) def UpperCamelCase ( __lowerCamelCase : int ): snake_case : Tuple = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase ) snake_case : int = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase ) class UpperCAmelCase ( A_ ): A__ : Any = "sigmoid" A__ : str = "softmax" A__ : int = "none" @add_end_docstrings( A_ ,r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " ,) class UpperCAmelCase ( A_ ): A__ : int = False A__ : Union[str, Any] = ClassificationFunction.NONE def __init__(self : List[str] , **snake_case__ : int ) -> str: '''simple docstring''' super().__init__(**snake_case__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]="" , **snake_case__ : List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = tokenizer_kwargs snake_case : List[Any] = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: snake_case : Optional[int] = self.model.config.return_all_scores if isinstance(snake_case__ , snake_case__ ) or top_k is None: snake_case : List[Any] = top_k snake_case : str = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case__ , ) if return_all_scores: snake_case : List[str] = None else: snake_case : Optional[int] = 1 if isinstance(snake_case__ , snake_case__ ): snake_case : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: snake_case : Optional[int] = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self : Dict , *snake_case__ : List[str] , **snake_case__ : int ) -> Optional[int]: '''simple docstring''' snake_case : Optional[int] = super().__call__(*snake_case__ , **snake_case__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. snake_case : Tuple = "top_k" not in kwargs if isinstance(args[0] , snake_case__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , **snake_case__ : Union[str, Any] ) -> Dict[str, GenericTensor]: '''simple docstring''' snake_case : int = self.framework if isinstance(snake_case__ , snake_case__ ): return self.tokenizer(**snake_case__ , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1 and isinstance(inputs[0] , snake_case__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] ) -> int: '''simple docstring''' return self.model(**snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : Dict=1 , snake_case__ : Tuple=True ) -> str: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: snake_case : Tuple = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: snake_case : Tuple = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: snake_case : Tuple = self.model.config.function_to_apply else: snake_case : int = ClassificationFunction.NONE snake_case : Any = model_outputs["logits"][0] snake_case : List[str] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: snake_case : Optional[Any] = sigmoid(snake_case__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: snake_case : Union[str, Any] = softmax(snake_case__ ) elif function_to_apply == ClassificationFunction.NONE: snake_case : Optional[Any] = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} snake_case : Optional[int] = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case__ ) ] if not _legacy: dict_scores.sort(key=lambda snake_case__ : x["score"] , reverse=snake_case__ ) if top_k is not None: snake_case : Optional[int] = dict_scores[:top_k] return dict_scores
59
0
from typing import Any import numpy as np def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" return np.array_equal(__lowerCamelCase , matrix.conjugate().T ) def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]: """simple docstring""" __A = v.conjugate().T __A = v_star.dot(__lowerCamelCase ) assert isinstance(__lowerCamelCase , np.ndarray ) return (v_star_dot.dot(__lowerCamelCase )) / (v_star.dot(__lowerCamelCase )) def UpperCAmelCase ( ) -> Any: """simple docstring""" __A = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) __A = np.array([[1], [2], [3]] ) assert is_hermitian(__lowerCamelCase ), F'''{a} is not hermitian.''' print(rayleigh_quotient(__lowerCamelCase , __lowerCamelCase ) ) __A = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__lowerCamelCase ), F'''{a} is not hermitian.''' assert rayleigh_quotient(__lowerCamelCase , __lowerCamelCase ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
15
from __future__ import annotations __lowerCamelCase = list[list[int]] # assigning initial values to the grid __lowerCamelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __lowerCamelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def UpperCamelCase ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def UpperCamelCase ( __lowerCamelCase : Matrix ): for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def UpperCamelCase ( __lowerCamelCase : Matrix ): if location := find_empty_location(__lowerCamelCase ): snake_case , snake_case : Union[str, Any] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): snake_case : List[Any] = digit if sudoku(__lowerCamelCase ) is not None: return grid snake_case : Union[str, Any] = 0 return None def UpperCamelCase ( __lowerCamelCase : Matrix ): for row in grid: for cell in row: print(__lowerCamelCase , end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") __lowerCamelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
59
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : List[Any] = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
115
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def UpperCamelCase ( __lowerCamelCase : np.ndarray ): return input_array.reshape((input_array.size, 1) ) def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Any = np.nan for i in range(__lowerCamelCase ): snake_case : List[str] = features[:, labels == i] snake_case : Dict = data.mean(1 ) # Centralize the data of class i snake_case : Optional[Any] = data - column_reshape(__lowerCamelCase ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(__lowerCamelCase , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Optional[Any] = features.mean(1 ) snake_case : Tuple = np.nan for i in range(__lowerCamelCase ): snake_case : Tuple = features[:, labels == i] snake_case : Tuple = data.shape[1] snake_case : List[str] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[int] = device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): # Check if the features have been loaded if features.any(): snake_case : Tuple = features.mean(1 ) # Center the dataset snake_case : List[str] = features - np.reshape(__lowerCamelCase , (data_mean.size, 1) ) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) / features.shape[1] snake_case , snake_case : Dict = np.linalg.eigh(__lowerCamelCase ) # Take all the columns in the reverse order (-1), and then takes only the first snake_case : Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space snake_case : Union[str, Any] = np.dot(filtered_eigenvectors.T , __lowerCamelCase ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ): assert classes > dimensions # Check if features have been already loaded if features.any: snake_case , snake_case : str = eigh( covariance_between_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , covariance_within_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , ) snake_case : str = eigenvectors[:, ::-1][:, :dimensions] snake_case , snake_case , snake_case : int = np.linalg.svd(__lowerCamelCase ) snake_case : List[Any] = svd_matrix[:, 0:dimensions] snake_case : Optional[Any] = np.dot(filtered_svd_matrix.T , __lowerCamelCase ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( ): # Create dummy dataset with 2 classes and 3 features snake_case : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) snake_case : Union[str, Any] = np.array([0, 0, 0, 1, 1] ) snake_case : List[Any] = 2 snake_case : Any = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(__lowerCamelCase ) as error_info: snake_case : str = linear_discriminant_analysis( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if isinstance(__lowerCamelCase , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def UpperCamelCase ( ): snake_case : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) snake_case : List[str] = 2 snake_case : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(__lowerCamelCase ) as error_info: snake_case : Union[str, Any] = principal_component_analysis(__lowerCamelCase , __lowerCamelCase ) if not np.allclose(__lowerCamelCase , __lowerCamelCase ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
59
0
'''simple docstring''' import requests __lowerCAmelCase : Optional[Any] ="https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def UpperCamelCase ( _lowerCamelCase : str ): # fetching a list of articles in json format A__ = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(F"{i}.) {article['title']}" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
237
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def UpperCamelCase ( __lowerCamelCase : Optional[int] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def UpperCamelCase ( __lowerCamelCase : str ): class UpperCAmelCase : def __init__(self : Optional[int] , snake_case__ : str ) -> Any: '''simple docstring''' snake_case : List[str] = metric_id class UpperCAmelCase : A__ : List[str] = [MetricMock(A_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]: '''simple docstring''' return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Any ): if "tmp_path" in args: snake_case : str = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(__lowerCamelCase , match="https://huggingface.co/docs/evaluate" ): func(*__lowerCamelCase )
59
0
import math def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' if initial_intensity < 0: raise ValueError('''The value of intensity cannot be negative''' ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__lowerCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='''malus_law''')
43
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __lowerCamelCase = logging.getLogger(__name__) __lowerCamelCase = """pytorch_model.bin""" @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} ,) @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "A csv or a json file containing the validation data."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "The name of the task to train on."} ,) A__ : Optional[List[str]] = dataclasses.field( default=A_ ,metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) A__ : Optional[str] = dataclasses.field( default="accuracy" ,metadata={"help": "The evaluation metric used for the task."} ) A__ : Optional[str] = dataclasses.field( default="no" ,metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } ,) A__ : Optional[int] = dataclasses.field( default=10 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,) A__ : Optional[float] = dataclasses.field( default=0.0 ,metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} ,) A__ : Optional[float] = dataclasses.field( default=0.0 ,metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} ,) A__ : Optional[int] = dataclasses.field( default=1_00 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,) A__ : Optional[int] = dataclasses.field( default=A_ ,metadata={"help": "Random seed for initialization."} ,) def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ): snake_case : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: snake_case : Optional[int] = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 snake_case : int = int(eval_result * len(__lowerCamelCase ) ) print(__lowerCamelCase ) snake_case : List[str] = dataset.sort("probability" , reverse=__lowerCamelCase ) snake_case : Tuple = dataset.select(range(__lowerCamelCase ) ) snake_case : List[Any] = dataset.remove_columns(["label", "probability"] ) snake_case : Any = dataset.rename_column("prediction" , "label" ) snake_case : str = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} ) snake_case : List[str] = dataset.shuffle(seed=args.seed ) snake_case : int = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(__lowerCamelCase , index=__lowerCamelCase ) else: dataset.to_json(__lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ): snake_case : int = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() snake_case : Dict = STModelArguments(model_name_or_path=__lowerCamelCase ) snake_case : Tuple = STDataArguments(train_file=__lowerCamelCase , infer_file=__lowerCamelCase ) snake_case : str = STTrainingArguments(output_dir=__lowerCamelCase ) snake_case : int = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__lowerCamelCase ).items(): setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for key, value in kwargs.items(): if hasattr(__lowerCamelCase , __lowerCamelCase ): setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Sanity checks snake_case : List[str] = {} snake_case : Optional[int] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None snake_case : str = args.train_file snake_case : Tuple = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None snake_case : Tuple = args.eval_file for key in data_files: snake_case : List[Any] = data_files[key].split("." )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: snake_case : Union[str, Any] = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("Creating the initial data directory for self-training..." ) snake_case : List[Any] = f"""{args.output_dir}/self-train_iter-{{}}""".format snake_case : Optional[int] = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) accelerator.wait_for_everyone() snake_case : Dict = None snake_case : Union[str, Any] = None snake_case : Tuple = 0 snake_case : List[Any] = False # Show the progress bar snake_case : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): snake_case : str = data_dir_format(__lowerCamelCase ) assert os.path.exists(__lowerCamelCase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 snake_case : Dict = os.path.join(__lowerCamelCase , "stage-1" ) snake_case : Optional[Any] = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__lowerCamelCase , __lowerCamelCase ): arguments_dict.update({key: value} ) snake_case : int = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowerCamelCase , __lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowerCamelCase ) finetune(**__lowerCamelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCamelCase ) logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowerCamelCase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data snake_case : str = os.path.join(__lowerCamelCase , "best-checkpoint" ) snake_case : Dict = os.path.join(__lowerCamelCase , "stage-2" ) # Update arguments_dict snake_case : List[str] = model_path snake_case : Optional[Any] = data_files["train"] snake_case : Optional[Any] = current_output_dir snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowerCamelCase , __lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowerCamelCase ) finetune(**__lowerCamelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCamelCase ) logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowerCamelCase ) snake_case : int = iteration snake_case : Tuple = data_dir_format(iteration + 1 ) snake_case : Tuple = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase , "best-checkpoint" ) ) snake_case : Optional[int] = config.idalabel snake_case : List[Any] = os.path.join(__lowerCamelCase , "eval_results_best-checkpoint.json" ) snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "test_results_best-checkpoint.json" ) assert os.path.exists(__lowerCamelCase ) with open(__lowerCamelCase , "r" ) as f: snake_case : Dict = float(json.load(__lowerCamelCase )[args.eval_metric] ) snake_case : Optional[int] = os.path.join(__lowerCamelCase , "infer_output_best-checkpoint.csv" ) assert os.path.exists(__lowerCamelCase ) # Loading the dataset from local csv or json files. snake_case : Optional[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"] snake_case : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"] if accelerator.is_main_process: os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(__lowerCamelCase ): shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) accelerator.wait_for_everyone() snake_case : str = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: snake_case : List[Any] = eval_result if best_iteration is None: snake_case : List[Any] = new_iteration snake_case : int = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: snake_case : int = new_iteration snake_case : Union[str, Any] = new_eval_result snake_case : str = 0 else: if new_eval_result == best_eval_result: snake_case : Any = new_iteration snake_case : Union[str, Any] = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: snake_case : Tuple = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" , __lowerCamelCase ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCamelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
59
0
"""simple docstring""" import math def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 ): """simple docstring""" lowerCamelCase__ : List[str] =end or len(__lowerCamelCase ) for i in range(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Tuple =i lowerCamelCase__ : Optional[int] =array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: lowerCamelCase__ : Tuple =array[temp_index - 1] temp_index -= 1 lowerCamelCase__ : Dict =temp_index_value return array def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int ): # Max Heap """simple docstring""" lowerCamelCase__ : Union[str, Any] =index lowerCamelCase__ : Any =2 * index + 1 # Left Node lowerCamelCase__ : Any =2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: lowerCamelCase__ : Dict =left_index if right_index < heap_size and array[largest] < array[right_index]: lowerCamelCase__ : List[str] =right_index if largest != index: lowerCamelCase__ : Any =array[largest], array[index] heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def snake_case__ ( __lowerCamelCase : list ): """simple docstring""" lowerCamelCase__ : Optional[int] =len(__lowerCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for i in range(n - 1 , 0 , -1 ): lowerCamelCase__ : Any =array[0], array[i] heapify(__lowerCamelCase , 0 , __lowerCamelCase ) return array def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ : Tuple =low lowerCamelCase__ : List[str] =high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i lowerCamelCase__ : List[Any] =array[j], array[i] i += 1 def snake_case__ ( __lowerCamelCase : list ): """simple docstring""" if len(__lowerCamelCase ) == 0: return array lowerCamelCase__ : Tuple =2 * math.ceil(math.loga(len(__lowerCamelCase ) ) ) lowerCamelCase__ : Optional[int] =16 return intro_sort(__lowerCamelCase , 0 , len(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase ) def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(__lowerCamelCase ) max_depth -= 1 lowerCamelCase__ : Tuple =median_of_a(__lowerCamelCase , __lowerCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) lowerCamelCase__ : Optional[int] =partition(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) intro_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] =p return insertion_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() _lowercase : int = input("Enter numbers separated by a comma : ").strip() _lowercase : Optional[Any] = [float(item) for item in user_input.split(",")] print(sort(unsorted))
238
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
59
0
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): super().tearDown() gc.collect() def lowerCAmelCase_ ( self : Any ): SCREAMING_SNAKE_CASE_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) SCREAMING_SNAKE_CASE_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) SCREAMING_SNAKE_CASE_ = "xvjiarui/stable-diffusion-2-inpainting" SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case__ , safety_checker=snake_case__ ) SCREAMING_SNAKE_CASE_ = "Face of a yellow cat, high resolution, sitting on a park bench" SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE_ = 50 SCREAMING_SNAKE_CASE_ = jax.device_count() SCREAMING_SNAKE_CASE_ = num_samples * [prompt] SCREAMING_SNAKE_CASE_ = num_samples * [init_image] SCREAMING_SNAKE_CASE_ = num_samples * [mask_image] SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(snake_case__ , snake_case__ , snake_case__ ) # shard inputs and rng SCREAMING_SNAKE_CASE_ = replicate(snake_case__ ) SCREAMING_SNAKE_CASE_ = jax.random.split(snake_case__ , jax.device_count() ) SCREAMING_SNAKE_CASE_ = shard(snake_case__ ) SCREAMING_SNAKE_CASE_ = shard(snake_case__ ) SCREAMING_SNAKE_CASE_ = shard(snake_case__ ) SCREAMING_SNAKE_CASE_ = pipeline( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ) SCREAMING_SNAKE_CASE_ = output.images.reshape(snake_case__ , 512 , 512 , 3 ) SCREAMING_SNAKE_CASE_ = images[0, 253:256, 253:256, -1] SCREAMING_SNAKE_CASE_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) SCREAMING_SNAKE_CASE_ = jnp.array( [0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
225
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase ( A_ ): A__ : List[str] = "megatron-bert" def __init__(self : Optional[int] , snake_case__ : List[str]=2_90_56 , snake_case__ : List[Any]=10_24 , snake_case__ : str=24 , snake_case__ : Tuple=16 , snake_case__ : Union[str, Any]=40_96 , snake_case__ : str="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_12 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : int=0 , snake_case__ : Tuple="absolute" , snake_case__ : Any=True , **snake_case__ : Union[str, Any] , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) snake_case : Tuple = vocab_size snake_case : str = hidden_size snake_case : str = num_hidden_layers snake_case : str = num_attention_heads snake_case : Optional[int] = hidden_act snake_case : int = intermediate_size snake_case : List[str] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Dict = max_position_embeddings snake_case : List[str] = type_vocab_size snake_case : List[str] = initializer_range snake_case : Tuple = layer_norm_eps snake_case : int = position_embedding_type snake_case : str = use_cache
59
0
"""simple docstring""" from __future__ import annotations import queue class snake_case : """simple docstring""" def __init__( self : Union[str, Any] ,lowerCamelCase__ : int ): UpperCAmelCase__ = data UpperCAmelCase__ = None UpperCAmelCase__ = None def a_ ( ): print('\n********Press N to stop entering at any point of time********\n' ) UpperCAmelCase__ = input('Enter the value of the root node: ' ).strip().lower() UpperCAmelCase__ = queue.Queue() UpperCAmelCase__ = TreeNode(int(__lowerCamelCase ) ) q.put(__lowerCamelCase ) while not q.empty(): UpperCAmelCase__ = q.get() UpperCAmelCase__ = f'''Enter the left node of {node_found.data}: ''' UpperCAmelCase__ = input(__lowerCamelCase ).strip().lower() or "n" if check == "n": return tree_node UpperCAmelCase__ = TreeNode(int(__lowerCamelCase ) ) UpperCAmelCase__ = left_node q.put(__lowerCamelCase ) UpperCAmelCase__ = f'''Enter the right node of {node_found.data}: ''' UpperCAmelCase__ = input(__lowerCamelCase ).strip().lower() or "n" if check == "n": return tree_node UpperCAmelCase__ = TreeNode(int(__lowerCamelCase ) ) UpperCAmelCase__ = right_node q.put(__lowerCamelCase ) raise def a_ ( lowerCamelCase ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def a_ ( lowerCamelCase ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def a_ ( lowerCamelCase ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def a_ ( lowerCamelCase ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return UpperCAmelCase__ = queue.Queue() q.put(__lowerCamelCase ) while not q.empty(): UpperCAmelCase__ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def a_ ( lowerCamelCase ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return UpperCAmelCase__ = queue.Queue() q.put(__lowerCamelCase ) while not q.empty(): UpperCAmelCase__ = [] while not q.empty(): UpperCAmelCase__ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__lowerCamelCase ) def a_ ( lowerCamelCase ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return UpperCAmelCase__ = [] UpperCAmelCase__ = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(__lowerCamelCase ) UpperCAmelCase__ = n.left # end of while means current node doesn't have left child UpperCAmelCase__ = stack.pop() # start to traverse its right child UpperCAmelCase__ = n.right def a_ ( lowerCamelCase ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return UpperCAmelCase__ = [] UpperCAmelCase__ = node while n or stack: while n: stack.append(__lowerCamelCase ) UpperCAmelCase__ = n.left UpperCAmelCase__ = stack.pop() print(n.data , end=',' ) UpperCAmelCase__ = n.right def a_ ( lowerCamelCase ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return UpperCAmelCase__ = [], [] UpperCAmelCase__ = node stacka.append(__lowerCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 UpperCAmelCase__ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__lowerCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def a_ ( lowerCamelCase = "" , lowerCamelCase=5_0 , lowerCamelCase="*" ): if not s: return "\n" + width * char UpperCAmelCase__ = divmod(width - len(__lowerCamelCase ) - 2 , 2 ) return f'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) lowerCAmelCase__ : str = build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
98
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> List[str]: '''simple docstring''' return f"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy""" def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int: '''simple docstring''' super().tearDown() gc.collect() def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 , snake_case__ : Any=(4, 4, 64, 64) , snake_case__ : List[Any]=False ) -> int: '''simple docstring''' snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa snake_case : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return image def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> List[Any]: '''simple docstring''' snake_case : List[str] = jnp.bfloataa if fpaa else jnp.floataa snake_case : str = "bf16" if fpaa else None snake_case , snake_case : Optional[int] = FlaxUNetaDConditionModel.from_pretrained( snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ ) return model, params def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=(4, 77, 7_68) , snake_case__ : Dict=False ) -> List[str]: '''simple docstring''' snake_case : Any = jnp.bfloataa if fpaa else jnp.floataa snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ) -> List[str]: '''simple docstring''' snake_case , snake_case : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_latents(snake_case__ , fpaa=snake_case__ ) snake_case : List[str] = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ ) snake_case : Dict = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Optional[int] = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> str: '''simple docstring''' snake_case , snake_case : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ ) snake_case : List[str] = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 10_24) , fpaa=snake_case__ ) snake_case : Optional[int] = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Dict = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
59
0
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def _A (__a ) -> List[str]: """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(__lowerCamelCase , 0 , __lowerCamelCase , args=(__lowerCamelCase) )[0] def _A (__a , __a ) -> Any: """simple docstring""" return math.pow(__lowerCamelCase , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
91
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def UpperCamelCase ( __lowerCamelCase : Dataset , __lowerCamelCase : Dict[str, str] ): snake_case : int = args.log_outputs snake_case : Dict = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric snake_case : List[str] = load_metric("wer" ) snake_case : Tuple = load_metric("cer" ) # compute metrics snake_case : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] ) snake_case : int = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results snake_case : int = f"""WER: {wer_result}\nCER: {cer_result}""" print(__lowerCamelCase ) with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f: f.write(__lowerCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case : int = f"""log_{dataset_id}_predictions.txt""" snake_case : List[Any] = f"""log_{dataset_id}_targets.txt""" with open(__lowerCamelCase , "w" ) as p, open(__lowerCamelCase , "w" ) as t: # mapping function to write output def write_to_file(__lowerCamelCase : str , __lowerCamelCase : Optional[int] ): p.write(f"""{i}""" + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f"""{i}""" + "\n" ) t.write(batch["target"] + "\n" ) result.map(__lowerCamelCase , with_indices=__lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : str ): snake_case : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case : List[Any] = re.sub(__lowerCamelCase , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case : Optional[Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: snake_case : Dict = " ".join(text.split(__lowerCamelCase ) ) return text def UpperCamelCase ( __lowerCamelCase : int ): # load dataset snake_case : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case : Union[str, Any] = feature_extractor.sampling_rate # resample audio snake_case : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=__lowerCamelCase ) ) # load eval pipeline if args.device is None: snake_case : List[str] = 0 if torch.cuda.is_available() else -1 snake_case : str = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__lowerCamelCase : int ): snake_case : Dict = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case : str = prediction["text"] snake_case : Tuple = normalize_text(batch["sentence"] ) return batch # run inference on all examples snake_case : Dict = dataset.map(__lowerCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __lowerCamelCase = parser.parse_args() main(args)
59
0
"""simple docstring""" from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class A__ ( A_): A_ : Union[List[PIL.Image.Image], np.ndarray] A_ : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
86
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class UpperCAmelCase ( A_ ): A__ : jnp.ndarray @flax_register_to_config class UpperCAmelCase ( nn.Module ,A_ ,A_ ): A__ : int = 32 A__ : int = 4 A__ : int = 4 A__ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) A__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") A__ : Union[bool, Tuple[bool]] = False A__ : Tuple[int] = (3_20, 6_40, 12_80, 12_80) A__ : int = 2 A__ : Union[int, Tuple[int]] = 8 A__ : Optional[Union[int, Tuple[int]]] = None A__ : int = 12_80 A__ : float = 0.0 A__ : bool = False A__ : jnp.dtype = jnp.floataa A__ : bool = True A__ : int = 0 A__ : bool = False def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' snake_case : Dict = (1, self.in_channels, self.sample_size, self.sample_size) snake_case : Any = jnp.zeros(snake_case__ , dtype=jnp.floataa ) snake_case : List[str] = jnp.ones((1,) , dtype=jnp.intaa ) snake_case : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case , snake_case : Optional[int] = jax.random.split(snake_case__ ) snake_case : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng} return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"] def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple: '''simple docstring''' snake_case : str = self.block_out_channels snake_case : Optional[Any] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case : Tuple = self.num_attention_heads or self.attention_head_dim # input snake_case : Tuple = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case : Union[str, Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case : Dict = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype ) snake_case : List[str] = self.only_cross_attention if isinstance(snake_case__ , snake_case__ ): snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(snake_case__ , snake_case__ ): snake_case : List[Any] = (num_attention_heads,) * len(self.down_block_types ) # down snake_case : List[Any] = [] snake_case : Optional[int] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): snake_case : List[Any] = output_channel snake_case : Dict = block_out_channels[i] snake_case : Optional[Any] = i == len(snake_case__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case : List[Any] = FlaxCrossAttnDownBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case : Union[str, Any] = FlaxDownBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(snake_case__ ) snake_case : Dict = down_blocks # mid snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up snake_case : Optional[Any] = [] snake_case : Optional[int] = list(reversed(snake_case__ ) ) snake_case : Dict = list(reversed(snake_case__ ) ) snake_case : Tuple = list(reversed(snake_case__ ) ) snake_case : Optional[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): snake_case : Optional[int] = output_channel snake_case : List[Any] = reversed_block_out_channels[i] snake_case : Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(snake_case__ ) - 1 )] snake_case : int = i == len(snake_case__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": snake_case : Any = FlaxCrossAttnUpBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case : Optional[int] = FlaxUpBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(snake_case__ ) snake_case : Optional[int] = output_channel snake_case : Tuple = up_blocks # out snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) snake_case : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : bool = True , snake_case__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(snake_case__ , jnp.ndarray ): snake_case : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case : Any = timesteps.astype(dtype=jnp.floataa ) snake_case : int = jnp.expand_dims(snake_case__ , 0 ) snake_case : str = self.time_proj(snake_case__ ) snake_case : str = self.time_embedding(snake_case__ ) # 2. pre-process snake_case : int = jnp.transpose(snake_case__ , (0, 2, 3, 1) ) snake_case : List[Any] = self.conv_in(snake_case__ ) # 3. down snake_case : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(snake_case__ , snake_case__ ): snake_case , snake_case : List[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train ) else: snake_case , snake_case : str = down_block(snake_case__ , snake_case__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: snake_case : Tuple = () for down_block_res_sample, down_block_additional_residual in zip( snake_case__ , snake_case__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) snake_case : Optional[int] = new_down_block_res_samples # 4. mid snake_case : Optional[int] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: snake_case : int = down_block_res_samples[-(self.layers_per_block + 1) :] snake_case : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(snake_case__ , snake_case__ ): snake_case : Optional[Any] = up_block( snake_case__ , temb=snake_case__ , encoder_hidden_states=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train , ) else: snake_case : Dict = up_block(snake_case__ , temb=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train ) # 6. post-process snake_case : List[str] = self.conv_norm_out(snake_case__ ) snake_case : Any = nn.silu(snake_case__ ) snake_case : Optional[int] = self.conv_out(snake_case__ ) snake_case : Union[str, Any] = jnp.transpose(snake_case__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=snake_case__ )
59
0
'''simple docstring''' from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : float | Decimal , snake_case_ : float = 10**-10 ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = a while True: UpperCAmelCase_ = Decimal(__lowerCamelCase ) - ( Decimal(eval(__lowerCamelCase ) ) / Decimal(eval(str(diff(__lowerCamelCase ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__lowerCamelCase ) ) < precision: # noqa: S307 return float(__lowerCamelCase ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}") # Find Square Root of 5 print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}") # Exponential Roots print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
1
__lowerCamelCase = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.35_5818, } def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case : List[Any] = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {', '.join(__lowerCamelCase )}""" ) raise ValueError(__lowerCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
59
0
import logging from transformers.configuration_utils import PretrainedConfig __A =logging.getLogger(__name__) class _SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ = "masked_bert" def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=0 , lowercase="topK" , lowercase="constant" , lowercase=0.0 , **lowercase , ) -> Tuple: super().__init__(pad_token_id=snake_case__ , **snake_case__ ) lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_act lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = pruning_method lowerCamelCase_ = mask_init lowerCamelCase_ = mask_scale
19
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , ): snake_case : int = {} if train_file is not None: snake_case : List[Any] = [train_file] if eval_file is not None: snake_case : Optional[int] = [eval_file] if test_file is not None: snake_case : Any = [test_file] snake_case : int = datasets.load_dataset("csv" , data_files=__lowerCamelCase ) snake_case : str = list(ds[list(files.keys() )[0]].features.keys() ) snake_case : int = features_name.pop(__lowerCamelCase ) snake_case : str = list(set(ds[list(files.keys() )[0]][label_name] ) ) snake_case : str = {label: i for i, label in enumerate(__lowerCamelCase )} snake_case : List[Any] = tokenizer.model_input_names snake_case : List[Any] = {} if len(__lowerCamelCase ) == 1: for k in files.keys(): snake_case : Tuple = ds[k].map( lambda __lowerCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) , batched=__lowerCamelCase , ) elif len(__lowerCamelCase ) == 2: for k in files.keys(): snake_case : List[Any] = ds[k].map( lambda __lowerCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) , batched=__lowerCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: snake_case : Dict = {k: v for k, v in ex.items() if k in input_names} snake_case : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: snake_case : str = {k: v for k, v in ex.items() if k in input_names} snake_case : Any = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: snake_case : str = {k: v for k, v in ex.items() if k in input_names} snake_case : List[str] = labelaid[ex[label_name]] yield (d, label) snake_case : int = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: snake_case : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) snake_case : Tuple = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: snake_case : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) snake_case : Optional[int] = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: snake_case : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCamelCase = logging.getLogger(__name__) @dataclass class UpperCAmelCase : A__ : int = field(metadata={"help": "Which column contains the label"} ) A__ : str = field(default=A_ ,metadata={"help": "The path of the training file"} ) A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the development file"} ) A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the test file"} ) A__ : int = field( default=1_28 ,metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } ,) A__ : bool = field( default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} ) @dataclass class UpperCAmelCase : A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. A__ : Optional[str] = field( default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,) def UpperCamelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) snake_case , snake_case , snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case , snake_case , snake_case , snake_case : Tuple = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) snake_case : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): snake_case : int = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(__lowerCamelCase : EvalPrediction ) -> Dict: snake_case : Optional[int] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer snake_case : int = TFTrainer( model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case : int = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) snake_case : Any = trainer.evaluate() snake_case : List[Any] = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__lowerCamelCase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(__lowerCamelCase ) return results if __name__ == "__main__": main()
59
0
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class UpperCAmelCase ( A_ ): '''simple docstring''' def __init__( self : Tuple ,A : Union[str, "sqlalchemy.sql.Selectable"] ,A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,A : Optional[Features] = None ,A : str = None ,A : bool = False ,**A : Dict ,): super().__init__(features=snake_case__ ,cache_dir=snake_case__ ,keep_in_memory=snake_case__ ,**snake_case__ ) __A = Sql( cache_dir=snake_case__ ,features=snake_case__ ,sql=snake_case__ ,con=snake_case__ ,**snake_case__ ,) def UpperCamelCase_ ( self : Any ): __A = None __A = None __A = None __A = None self.builder.download_and_prepare( download_config=snake_case__ ,download_mode=snake_case__ ,verification_mode=snake_case__ ,base_path=snake_case__ ,) # Build dataset for splits __A = self.builder.as_dataset( split="train" ,verification_mode=snake_case__ ,in_memory=self.keep_in_memory ) return dataset class UpperCAmelCase : '''simple docstring''' def __init__( self : Union[str, Any] ,A : Dataset ,A : str ,A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,A : Optional[int] = None ,A : Optional[int] = None ,**A : Dict ,): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) __A = dataset __A = name __A = con __A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __A = num_proc __A = to_sql_kwargs def UpperCamelCase_ ( self : List[Any] ): __A = self.to_sql_kwargs.pop("sql" ,snake_case__ ) __A = self.to_sql_kwargs.pop("con" ,snake_case__ ) __A = self.to_sql_kwargs.pop("index" ,snake_case__ ) __A = self._write(index=snake_case__ ,**self.to_sql_kwargs ) return written def UpperCamelCase_ ( self : str ,A : Tuple ): __A = args __A = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs __A = query_table( table=self.dataset.data ,key=slice(snake_case__ ,offset + self.batch_size ) ,indices=self.dataset._indices ,) __A = batch.to_pandas() __A = df.to_sql(self.name ,self.con ,index=snake_case__ ,**snake_case__ ) return num_rows or len(snake_case__ ) def UpperCamelCase_ ( self : List[Any] ,A : str ,**A : Union[str, Any] ): __A = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating SQL from Arrow format" ,): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __A = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,snake_case__ ,snake_case__ )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating SQL from Arrow format" ,): written += num_rows return written
15
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]: '''simple docstring''' snake_case : int = tempfile.mkdtemp() # fmt: off snake_case : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) snake_case : int = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } snake_case : Optional[Any] = os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : str ) -> Optional[int]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : List[str] ) -> int: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> str: '''simple docstring''' snake_case : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] snake_case : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = self.get_tokenizer() snake_case : Optional[Any] = self.get_image_processor() snake_case : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor.save_pretrained(self.tmpdirname ) snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]: '''simple docstring''' snake_case : str = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) snake_case : Tuple = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) snake_case : List[str] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int: '''simple docstring''' snake_case : str = self.get_image_processor() snake_case : Optional[int] = self.get_tokenizer() snake_case : List[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Optional[Any] = self.prepare_image_inputs() snake_case : str = image_processor(snake_case__ , return_tensors="np" ) snake_case : Any = processor(images=snake_case__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Dict = self.get_image_processor() snake_case : int = self.get_tokenizer() snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Tuple = "lower newer" snake_case : Tuple = processor(text=snake_case__ ) snake_case : Union[str, Any] = tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]: '''simple docstring''' snake_case : List[Any] = self.get_image_processor() snake_case : Dict = self.get_tokenizer() snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : int = "lower newer" snake_case : Dict = self.prepare_image_inputs() snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(snake_case__ ): processor() def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple: '''simple docstring''' snake_case : Tuple = self.get_image_processor() snake_case : Optional[Any] = self.get_tokenizer() snake_case : Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case : List[Any] = processor.batch_decode(snake_case__ ) snake_case : Union[str, Any] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : str = self.get_image_processor() snake_case : Union[str, Any] = self.get_tokenizer() snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Optional[Any] = "lower newer" snake_case : List[Any] = self.prepare_image_inputs() snake_case : Tuple = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
59
0
"""simple docstring""" import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel UpperCAmelCase : List[Any] = HfApi() UpperCAmelCase : Optional[int] = {} # fmt: off UpperCAmelCase : int = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) UpperCAmelCase : Optional[Any] = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) UpperCAmelCase : str = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) UpperCAmelCase : Dict = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) UpperCAmelCase : Dict = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) UpperCAmelCase : Optional[Any] = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) UpperCAmelCase : List[Any] = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) UpperCAmelCase : int = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) UpperCAmelCase : Any = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) UpperCAmelCase : Union[str, Any] = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) UpperCAmelCase : List[Any] = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) UpperCAmelCase : str = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) UpperCAmelCase : Optional[Any] = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) UpperCAmelCase : Optional[Any] = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) UpperCAmelCase : Optional[int] = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on UpperCAmelCase : Union[str, Any] = api.list_models(filter='diffusers') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": UpperCAmelCase : int = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1] print(F"Started running {mod.modelId}!!!") if mod.modelId.startswith('CompVis'): UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet') else: UpperCAmelCase : Dict = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) UpperCAmelCase : Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) UpperCAmelCase : Dict = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): UpperCAmelCase : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3 ) print(F"{mod.modelId} has passed successfully!!!")
115
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
59
0
'''simple docstring''' import re from filelock import FileLock try: import nltk __lowerCAmelCase : str =True except (ImportError, ModuleNotFoundError): __lowerCAmelCase : Dict =False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) def UpperCamelCase ( _lowerCamelCase : str ): re.sub("<n>" , "" , __lowerCamelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
237
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase : def __init__(self : Dict , snake_case__ : Dict , snake_case__ : Any=13 , snake_case__ : Any=32 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : List[Any]=16 , snake_case__ : int=[1, 2, 1] , snake_case__ : Dict=[2, 2, 4] , snake_case__ : Dict=2 , snake_case__ : Tuple=2.0 , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=True , snake_case__ : List[str]=0.02 , snake_case__ : int=1e-5 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=10 , snake_case__ : Optional[Any]=8 , snake_case__ : Any=["stage1", "stage2", "stage3"] , snake_case__ : Tuple=[1, 2, 3] , ) -> Union[str, Any]: '''simple docstring''' snake_case : Any = parent snake_case : Optional[int] = batch_size snake_case : Union[str, Any] = image_size snake_case : Dict = patch_size snake_case : Optional[Any] = num_channels snake_case : Union[str, Any] = embed_dim snake_case : int = depths snake_case : List[str] = num_heads snake_case : Union[str, Any] = window_size snake_case : Union[str, Any] = mlp_ratio snake_case : List[Any] = qkv_bias snake_case : List[Any] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Union[str, Any] = drop_path_rate snake_case : int = hidden_act snake_case : Optional[int] = use_absolute_embeddings snake_case : int = patch_norm snake_case : Union[str, Any] = layer_norm_eps snake_case : Any = initializer_range snake_case : Optional[Any] = is_training snake_case : Tuple = scope snake_case : Optional[int] = use_labels snake_case : Optional[Any] = type_sequence_label_size snake_case : Union[str, Any] = encoder_stride snake_case : Any = out_features snake_case : Tuple = out_indices def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case : int = None if self.use_labels: snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : Dict = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int: '''simple docstring''' return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) snake_case : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> str: '''simple docstring''' snake_case : Optional[int] = MaskFormerSwinBackbone(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case__ ): snake_case : Tuple = ["stem"] snake_case : List[Any] = MaskFormerSwinBackbone(config=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' snake_case : Union[str, Any] = self.prepare_config_and_inputs() snake_case , snake_case , snake_case : List[Any] = config_and_inputs snake_case : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ): A__ : List[str] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) A__ : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} A__ : Optional[Any] = False A__ : List[Any] = False A__ : List[str] = False A__ : List[str] = False A__ : Union[str, Any] = False def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : str = MaskFormerSwinModelTester(self ) snake_case : Optional[int] = ConfigTester(self , config_class=snake_case__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]: '''simple docstring''' return def _SCREAMING_SNAKE_CASE (self : Dict ) -> str: '''simple docstring''' snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Dict: '''simple docstring''' snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case__ ) @unittest.skip("Swin does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' pass @unittest.skip("Swin does not support feedforward chunking" ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : int = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : str = model_class(snake_case__ ) snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case : Optional[Any] = [*signature.parameters.keys()] snake_case : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str: '''simple docstring''' pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ) -> Optional[int]: '''simple docstring''' snake_case : Tuple = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): snake_case : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) snake_case : int = outputs.hidden_states snake_case : Union[str, Any] = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) # Swin has a different seq_length snake_case : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case : int = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Dict = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Any = 3 snake_case : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case : str = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Optional[Any] = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : int ) -> str: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case__ : Union[str, Any] ): snake_case : Any = 0 return t def check_equivalence(snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[int]={} ): with torch.no_grad(): snake_case : Optional[Any] = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ) snake_case : Tuple = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple() def recursive_check(snake_case__ : List[str] , snake_case__ : Optional[Any] ): if isinstance(snake_case__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ): recursive_check(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case__ , snake_case__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1e-5 ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has""" f""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}.""" ) , ) recursive_check(snake_case__ , snake_case__ ) for model_class in self.all_model_classes: snake_case : Optional[int] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Dict = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) snake_case : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) @require_torch class UpperCAmelCase ( unittest.TestCase ,A_ ): A__ : int = (MaskFormerSwinBackbone,) if is_torch_available() else () A__ : int = MaskFormerSwinConfig def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModelTester(self ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Optional[int] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: snake_case : Optional[int] = backbone_class(snake_case__ ) backbone.to(snake_case__ ) backbone.eval() snake_case : Union[str, Any] = backbone(**snake_case__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case : Optional[int] = backbone(**snake_case__ , output_hidden_states=snake_case__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case , snake_case , snake_case : Dict = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case : Optional[Any] = backbone(**snake_case__ , output_attentions=snake_case__ ) self.assertIsNotNone(outputs.attentions )
59
0
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __lowercase = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if rng is None: __UpperCamelCase :Optional[Any] = random.Random() __UpperCamelCase :Union[str, Any] = 1 for dim in shape: total_dims *= dim __UpperCamelCase :str = [] for _ in range(__lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) __UpperCamelCase :List[Any] = np.array(__lowerCamelCase , dtype=jnp.intaa ).reshape(__lowerCamelCase ) return output def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ): '''simple docstring''' __UpperCamelCase :Any = ids_tensor(__lowerCamelCase , vocab_size=2 , rng=__lowerCamelCase ) # make sure that at least one token is attended to for each batch __UpperCamelCase :List[str] = 1 return attn_mask @require_flax class lowerCamelCase_ : '''simple docstring''' a__ : Dict = None a__ : Optional[int] = () def UpperCamelCase__ ( self) -> Any: __UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 __UpperCamelCase :str = 2 __UpperCamelCase :int = inputs["input_ids"].shape[-1] // 2 __UpperCamelCase :Union[str, Any] = inputs["input_ids"][:max_batch_size, :sequence_length] __UpperCamelCase :Tuple = jnp.ones_like(snake_case__) __UpperCamelCase :str = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens __UpperCamelCase :Any = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` __UpperCamelCase :Union[str, Any] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def UpperCamelCase__ ( self) -> Dict: __UpperCamelCase :Tuple = self._get_input_ids_and_config() __UpperCamelCase :Union[str, Any] = False __UpperCamelCase :Union[str, Any] = max_length __UpperCamelCase :List[Any] = 0 for model_class in self.all_generative_model_classes: __UpperCamelCase :List[Any] = model_class(snake_case__) __UpperCamelCase :Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning __UpperCamelCase :List[str] = getattr(snake_case__ , snake_case__) __UpperCamelCase :Optional[int] = pt_model_class(snake_case__).eval() __UpperCamelCase :Tuple = load_flax_weights_in_pytorch_model(snake_case__ , flax_model.params) __UpperCamelCase :str = flax_model.generate(snake_case__).sequences __UpperCamelCase :str = pt_model.generate(torch.tensor(snake_case__ , dtype=torch.long)) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: __UpperCamelCase :Tuple = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist()) def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :str = self._get_input_ids_and_config() __UpperCamelCase :Union[str, Any] = False __UpperCamelCase :List[str] = max_length for model_class in self.all_generative_model_classes: __UpperCamelCase :int = model_class(snake_case__) __UpperCamelCase :Dict = model.generate(snake_case__).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__) __UpperCamelCase :str = jit(model.generate) __UpperCamelCase :Optional[int] = jit_generate(snake_case__).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def UpperCamelCase__ ( self) -> Any: __UpperCamelCase :List[Any] = self._get_input_ids_and_config() __UpperCamelCase :Optional[Any] = True __UpperCamelCase :int = max_length for model_class in self.all_generative_model_classes: __UpperCamelCase :List[Any] = model_class(snake_case__) __UpperCamelCase :List[str] = model.generate(snake_case__).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__) __UpperCamelCase :Optional[int] = jit(model.generate) __UpperCamelCase :int = jit_generate(snake_case__).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def UpperCamelCase__ ( self) -> str: __UpperCamelCase :int = self._get_input_ids_and_config() __UpperCamelCase :List[str] = False __UpperCamelCase :Optional[Any] = max_length __UpperCamelCase :List[Any] = 2 for model_class in self.all_generative_model_classes: __UpperCamelCase :int = model_class(snake_case__) __UpperCamelCase :Any = model.generate(snake_case__).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__) __UpperCamelCase :int = jit(model.generate) __UpperCamelCase :Dict = jit_generate(snake_case__).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def UpperCamelCase__ ( self) -> Optional[Any]: __UpperCamelCase :List[Any] = self._get_input_ids_and_config() __UpperCamelCase :str = False __UpperCamelCase :Optional[int] = max_length __UpperCamelCase :Union[str, Any] = 2 __UpperCamelCase :Optional[int] = 2 for model_class in self.all_generative_model_classes: __UpperCamelCase :str = model_class(snake_case__) __UpperCamelCase :Dict = model.generate(snake_case__).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences) def UpperCamelCase__ ( self) -> str: __UpperCamelCase :Any = self._get_input_ids_and_config() __UpperCamelCase :int = True __UpperCamelCase :Dict = max_length __UpperCamelCase :Optional[int] = 0.8 __UpperCamelCase :Dict = 10 __UpperCamelCase :Optional[int] = 0.3 __UpperCamelCase :Tuple = 1 __UpperCamelCase :Optional[Any] = 8 __UpperCamelCase :List[Any] = 9 for model_class in self.all_generative_model_classes: __UpperCamelCase :Optional[int] = model_class(snake_case__) __UpperCamelCase :Union[str, Any] = model.generate(snake_case__).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__) __UpperCamelCase :Optional[int] = jit(model.generate) __UpperCamelCase :Any = jit_generate(snake_case__).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def UpperCamelCase__ ( self) -> Optional[int]: __UpperCamelCase :List[str] = self._get_input_ids_and_config() __UpperCamelCase :int = max_length __UpperCamelCase :int = 1 __UpperCamelCase :Optional[int] = 8 __UpperCamelCase :Any = 9 for model_class in self.all_generative_model_classes: __UpperCamelCase :Optional[int] = model_class(snake_case__) __UpperCamelCase :int = model.generate(snake_case__).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__) __UpperCamelCase :List[Any] = jit(model.generate) __UpperCamelCase :Union[str, Any] = jit_generate(snake_case__).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def UpperCamelCase__ ( self) -> str: __UpperCamelCase :List[Any] = self._get_input_ids_and_config() __UpperCamelCase :List[Any] = max_length __UpperCamelCase :Dict = 2 __UpperCamelCase :Any = 1 __UpperCamelCase :str = 8 __UpperCamelCase :Union[str, Any] = 9 for model_class in self.all_generative_model_classes: __UpperCamelCase :Union[str, Any] = model_class(snake_case__) __UpperCamelCase :Union[str, Any] = model.generate(snake_case__).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__) __UpperCamelCase :Optional[int] = jit(model.generate) __UpperCamelCase :Optional[Any] = jit_generate(snake_case__).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def UpperCamelCase__ ( self) -> List[Any]: __UpperCamelCase :Tuple = self._get_input_ids_and_config() # pad attention mask on the left __UpperCamelCase :List[Any] = attention_mask.at[(0, 0)].set(0) __UpperCamelCase :Tuple = False __UpperCamelCase :Tuple = max_length for model_class in self.all_generative_model_classes: __UpperCamelCase :Optional[int] = model_class(snake_case__) __UpperCamelCase :str = model.generate(snake_case__ , attention_mask=snake_case__).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__) __UpperCamelCase :List[str] = jit(model.generate) __UpperCamelCase :Dict = jit_generate(snake_case__ , attention_mask=snake_case__).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :Any = self._get_input_ids_and_config() # pad attention mask on the left __UpperCamelCase :List[str] = attention_mask.at[(0, 0)].set(0) __UpperCamelCase :Optional[int] = True __UpperCamelCase :Any = max_length for model_class in self.all_generative_model_classes: __UpperCamelCase :str = model_class(snake_case__) __UpperCamelCase :Optional[Any] = model.generate(snake_case__ , attention_mask=snake_case__).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__) __UpperCamelCase :Optional[Any] = jit(model.generate) __UpperCamelCase :List[str] = jit_generate(snake_case__ , attention_mask=snake_case__).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def UpperCamelCase__ ( self) -> int: __UpperCamelCase :Optional[int] = self._get_input_ids_and_config() # pad attention mask on the left __UpperCamelCase :Optional[int] = attention_mask.at[(0, 0)].set(0) __UpperCamelCase :Optional[Any] = 2 __UpperCamelCase :Optional[Any] = max_length for model_class in self.all_generative_model_classes: __UpperCamelCase :Union[str, Any] = model_class(snake_case__) __UpperCamelCase :Optional[Any] = model.generate(snake_case__ , attention_mask=snake_case__).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__) __UpperCamelCase :List[Any] = jit(model.generate) __UpperCamelCase :str = jit_generate(snake_case__ , attention_mask=snake_case__).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) @require_flax class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self) -> Any: __UpperCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''') __UpperCamelCase :List[str] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''') __UpperCamelCase :Any = "Hello world" __UpperCamelCase :str = tokenizer(snake_case__ , return_tensors='''np''').input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(snake_case__ , '''do_samples'''): model.generate(snake_case__ , do_samples=snake_case__) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(snake_case__ , '''foo'''): __UpperCamelCase :Optional[Any] = {"foo": "bar"} model.generate(snake_case__ , **snake_case__)
43
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : List[str] = [] snake_case : Optional[int] = [] snake_case : Any = [] for rt in rc.restypes: snake_case : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) snake_case : str = {name: i for i, name in enumerate(__lowerCamelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) snake_case : Optional[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : List[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : int = torch.tensor( __lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) snake_case : int = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein snake_case : List[Any] = restype_atomaa_to_atomaa[protein_aatype] snake_case : str = restype_atomaa_mask[protein_aatype] snake_case : str = residx_atomaa_mask snake_case : Any = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back snake_case : List[str] = restype_atomaa_to_atomaa[protein_aatype] snake_case : List[Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask snake_case : Union[str, Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device ) for restype, restype_letter in enumerate(rc.restypes ): snake_case : Optional[int] = rc.restype_atoa[restype_letter] snake_case : Any = rc.residue_atoms[restype_name] for atom_name in atom_names: snake_case : List[Any] = rc.atom_order[atom_name] snake_case : Optional[Any] = 1 snake_case : List[Any] = restype_atomaa_mask[protein_aatype] snake_case : int = residx_atomaa_mask return protein def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : Dict = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray ) snake_case : List[str] = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) ) return out
59
0
"""simple docstring""" import os import numpy import onnx def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =a.name lowerCamelCase__ : Optional[Any] =b.name lowerCamelCase__ : Any ="" lowerCamelCase__ : str ="" lowerCamelCase__ : str =a == b lowerCamelCase__ : Optional[int] =name_a lowerCamelCase__ : int =name_b return res def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowerCamelCase , __lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowerCamelCase , __lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowerCamelCase , __lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowerCamelCase , __lowerCamelCase ) def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] ): """simple docstring""" lowerCamelCase__ : Optional[int] =list(model.graph.initializer ) lowerCamelCase__ : Optional[Any] =list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowerCamelCase__ : Union[str, Any] =inits[i].name lowerCamelCase__ : Any =inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowerCamelCase , __lowerCamelCase ) def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =os.path.dirname(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =os.path.basename(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =onnx.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) ) lowerCamelCase__ : Optional[Any] =list(model.graph.initializer ) lowerCamelCase__ : Any =set() lowerCamelCase__ : Optional[Any] ={} lowerCamelCase__ : Tuple =[] lowerCamelCase__ : str =0 for i in range(len(__lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowerCamelCase ) dup_set.add(__lowerCamelCase ) lowerCamelCase__ : List[Any] =inits[j].data_type lowerCamelCase__ : str =numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , __lowerCamelCase ) total_reduced_size += mem_size lowerCamelCase__ : Optional[int] =inits[i].name lowerCamelCase__ : Optional[int] =inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowerCamelCase ) else: lowerCamelCase__ : Any =[name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' ) lowerCamelCase__ : Tuple =sorted(__lowerCamelCase ) _remove_dup_initializers_from_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : int ="optimized_" + model_file_name lowerCamelCase__ : str =os.path.join(__lowerCamelCase , __lowerCamelCase ) onnx.save(__lowerCamelCase , __lowerCamelCase ) return new_model
238
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __lowerCamelCase = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } __lowerCamelCase = { """unc-nlp/lxmert-base-uncased""": 5_12, } __lowerCamelCase = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class UpperCAmelCase ( A_ ): A__ : Any = VOCAB_FILES_NAMES A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A__ : Tuple = PRETRAINED_INIT_CONFIGURATION A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : List[Any] = LxmertTokenizer def __init__(self : Dict , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Optional[Any]="[SEP]" , snake_case__ : Optional[Any]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : Tuple="[MASK]" , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Dict , ) -> Optional[int]: '''simple docstring''' super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars ): snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) ) snake_case : str = do_lower_case snake_case : List[Any] = strip_accents snake_case : Optional[int] = tokenize_chinese_chars snake_case : int = normalizer_class(**snake_case__ ) snake_case : Optional[Any] = do_lower_case def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ) -> Any: '''simple docstring''' snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case : Optional[Any] = [self.sep_token_id] snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' snake_case : List[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
59
0
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : int=32 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : Dict=[10, 20, 30, 40] , _lowerCAmelCase : List[Any]=[1, 1, 2, 1] , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple="relu" , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Any=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = embeddings_size SCREAMING_SNAKE_CASE_ = hidden_sizes SCREAMING_SNAKE_CASE_ = depths SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = len(snake_case__ ) def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE_ = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self : Tuple ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ): SCREAMING_SNAKE_CASE_ = TFResNetModel(config=snake_case__ ) SCREAMING_SNAKE_CASE_ = model(snake_case__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = TFResNetForImageClassification(snake_case__ ) SCREAMING_SNAKE_CASE_ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( A_ , A_ , unittest.TestCase ): '''simple docstring''' lowercase_ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase_ = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def lowerCAmelCase_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE_ = TFResNetModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def lowerCAmelCase_ ( self : int ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self : str ): return @unittest.skip(reason='ResNet does not use inputs_embeds' ) def lowerCAmelCase_ ( self : Tuple ): pass @unittest.skip(reason='ResNet does not support input and output embeddings' ) def lowerCAmelCase_ ( self : Optional[Any] ): pass def lowerCAmelCase_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(snake_case__ ) SCREAMING_SNAKE_CASE_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def lowerCAmelCase_ ( self : Any ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def lowerCAmelCase_ ( self : Optional[int] ): def check_hidden_states_output(_lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ): SCREAMING_SNAKE_CASE_ = model_class(snake_case__ ) SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE_ = layer_type SCREAMING_SNAKE_CASE_ = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def lowerCAmelCase_ ( self : List[Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def lowerCAmelCase_ ( self : Any ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = TFResNetModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def UpperCAmelCase_ ( ) -> List[str]: SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : int ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=snake_case__ , return_tensors='tf' ) # forward pass SCREAMING_SNAKE_CASE_ = model(**snake_case__ ) # verify the logits SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) SCREAMING_SNAKE_CASE_ = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1E-4 ) )
225
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase ( A_ ): A__ : Dict = (DDIMParallelScheduler,) A__ : Tuple = (("eta", 0.0), ("num_inference_steps", 50)) def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case : Any = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**snake_case__ ) return config def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Optional[int] ) -> Any: '''simple docstring''' snake_case : List[Any] = self.scheduler_classes[0] snake_case : Any = self.get_scheduler_config(**snake_case__ ) snake_case : Any = scheduler_class(**snake_case__ ) snake_case , snake_case : Union[str, Any] = 10, 0.0 snake_case : List[Any] = self.dummy_model() snake_case : Any = self.dummy_sample_deter scheduler.set_timesteps(snake_case__ ) for t in scheduler.timesteps: snake_case : Optional[int] = model(snake_case__ , snake_case__ ) snake_case : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample return sample def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str: '''simple docstring''' for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=snake_case__ ) snake_case : Optional[int] = self.scheduler_classes[0] snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 ) snake_case : Union[str, Any] = scheduler_class(**snake_case__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : str ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]: '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any: '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]: '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=snake_case__ , eta=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case : Dict = self.scheduler_classes[0] snake_case : Tuple = self.get_scheduler_config() snake_case : Dict = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5 def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict: '''simple docstring''' snake_case : Union[str, Any] = self.scheduler_classes[0] snake_case : List[Any] = self.get_scheduler_config() snake_case : int = scheduler_class(**snake_case__ ) snake_case , snake_case : Any = 10, 0.0 scheduler.set_timesteps(snake_case__ ) snake_case : Optional[Any] = self.dummy_model() snake_case : str = self.dummy_sample_deter snake_case : Dict = self.dummy_sample_deter + 0.1 snake_case : Dict = self.dummy_sample_deter - 0.1 snake_case : Optional[Any] = samplea.shape[0] snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 ) snake_case : Tuple = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ ) snake_case : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) snake_case : List[str] = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ ) snake_case : Dict = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case : List[Any] = self.full_loop() snake_case : Optional[Any] = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.223967 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = self.full_loop(prediction_type="v_prediction" ) snake_case : int = torch.sum(torch.abs(snake_case__ ) ) snake_case : Optional[int] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]: '''simple docstring''' snake_case : Dict = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) snake_case : str = torch.sum(torch.abs(snake_case__ ) ) snake_case : Optional[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]: '''simple docstring''' snake_case : int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) snake_case : Tuple = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
59
0
"""simple docstring""" import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ : Tuple = logging.get_logger(__name__) lowerCAmelCase__ : Dict = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowerCAmelCase__ : str = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } lowerCAmelCase__ : int = {'facebook/blenderbot_small-90M': 512} def a_ ( lowerCamelCase ): UpperCAmelCase__ = set() UpperCAmelCase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ = char UpperCAmelCase__ = set(__lowerCamelCase ) return pairs class snake_case ( A_ ): """simple docstring""" snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict="__start__" ,lowerCamelCase__ : Any="__end__" ,lowerCamelCase__ : Tuple="__unk__" ,lowerCamelCase__ : Dict="__null__" ,**lowerCamelCase__ : List[Any] ,): super().__init__(unk_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,pad_token=snake_case__ ,**snake_case__ ) with open(snake_case__ ,encoding='utf-8' ) as vocab_handle: UpperCAmelCase__ = json.load(snake_case__ ) UpperCAmelCase__ = {v: k for k, v in self.encoder.items()} with open(snake_case__ ,encoding='utf-8' ) as merges_handle: UpperCAmelCase__ = merges_handle.read().split('\n' )[1:-1] UpperCAmelCase__ = [tuple(merge.split() ) for merge in merges] UpperCAmelCase__ = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) ) UpperCAmelCase__ = {} @property def __lowerCAmelCase ( self : str ): return len(self.encoder ) def __lowerCAmelCase ( self : Dict ): return dict(self.encoder ,**self.added_tokens_encoder ) def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : str ): if token in self.cache: return self.cache[token] UpperCAmelCase__ = re.sub('([.,!?()])' ,R' \1' ,snake_case__ ) UpperCAmelCase__ = re.sub('(\')' ,R' \1 ' ,snake_case__ ) UpperCAmelCase__ = re.sub(R'\s{2,}' ,' ' ,snake_case__ ) if "\n" in token: UpperCAmelCase__ = token.replace('\n' ,' __newln__' ) UpperCAmelCase__ = token.split(' ' ) UpperCAmelCase__ = [] for token in tokens: if not len(snake_case__ ): continue UpperCAmelCase__ = token.lower() UpperCAmelCase__ = tuple(snake_case__ ) UpperCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) UpperCAmelCase__ = get_pairs(snake_case__ ) if not pairs: words.append(snake_case__ ) continue while True: UpperCAmelCase__ = min(snake_case__ ,key=lambda lowerCamelCase__ : self.bpe_ranks.get(snake_case__ ,float('inf' ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ = bigram UpperCAmelCase__ = [] UpperCAmelCase__ = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ = word.index(snake_case__ ,snake_case__ ) new_word.extend(word[i:j] ) UpperCAmelCase__ = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ = tuple(snake_case__ ) UpperCAmelCase__ = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ = get_pairs(snake_case__ ) UpperCAmelCase__ = "@@ ".join(snake_case__ ) UpperCAmelCase__ = word[:-4] UpperCAmelCase__ = word words.append(snake_case__ ) return " ".join(snake_case__ ) def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : str ): UpperCAmelCase__ = [] UpperCAmelCase__ = re.findall(R'\S+\n?' ,snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(' ' ) ) ) return split_tokens def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : str ): UpperCAmelCase__ = token.lower() return self.encoder.get(snake_case__ ,self.encoder.get(self.unk_token ) ) def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : int ): return self.decoder.get(snake_case__ ,self.unk_token ) def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str] ): UpperCAmelCase__ = " ".join(snake_case__ ).replace('@@ ' ,'' ).strip() return out_string def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ): if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) UpperCAmelCase__ = os.path.join( snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(snake_case__ ,'w' ,encoding='utf-8' ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=snake_case__ ,ensure_ascii=snake_case__ ) + '\n' ) UpperCAmelCase__ = 0 with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) UpperCAmelCase__ = token_index writer.write(' '.join(snake_case__ ) + '\n' ) index += 1 return vocab_file, merge_file
98
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ): snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] snake_case : int = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(__lowerCamelCase ) <= key: return input_string for position, character in enumerate(__lowerCamelCase ): snake_case : Any = position % (lowest * 2) # puts it in bounds snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__lowerCamelCase ) snake_case : List[str] = ["".join(__lowerCamelCase ) for row in temp_grid] snake_case : Tuple = "".join(__lowerCamelCase ) return output_string def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ): snake_case : Dict = [] snake_case : Union[str, Any] = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] # generates template for position in range(len(__lowerCamelCase ) ): snake_case : List[str] = position % (lowest * 2) # puts it in bounds snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) snake_case : Tuple = 0 for row in temp_grid: # fills in the characters snake_case : Union[str, Any] = input_string[counter : counter + len(__lowerCamelCase )] grid.append(list(__lowerCamelCase ) ) counter += len(__lowerCamelCase ) snake_case : str = "" # reads as zigzag for position in range(len(__lowerCamelCase ) ): snake_case : Optional[int] = position % (lowest * 2) # puts it in bounds snake_case : Tuple = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def UpperCamelCase ( __lowerCamelCase : str ): snake_case : Tuple = {} for key_guess in range(1 , len(__lowerCamelCase ) ): # tries every key snake_case : Any = decrypt(__lowerCamelCase , __lowerCamelCase ) return results if __name__ == "__main__": import doctest doctest.testmod()
59
0
"""simple docstring""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCAmelCase_ : Any = logging.getLogger(__name__) def _A () -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=__lowerCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=__lowerCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=__lowerCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=__lowerCamelCase , default=10_00 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=__lowerCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=__lowerCamelCase , default=5_12 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=__lowerCamelCase , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args() return args def _A (__a ) -> Any: """simple docstring""" def fn(__a ): return tokenizer(examples['''text'''] ) return fn def _A (__a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = [] for i in range(len(tokenized_data['''input_ids'''] ) ): SCREAMING_SNAKE_CASE_ : str = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } SCREAMING_SNAKE_CASE_ : Optional[int] = tf.train.Features(feature=__lowerCamelCase ) SCREAMING_SNAKE_CASE_ : List[str] = tf.train.Example(features=__lowerCamelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = example.SerializeToString() records.append(__lowerCamelCase ) return records def _A (__a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: SCREAMING_SNAKE_CASE_ : Optional[Any] = min(len(__lowerCamelCase ) , args.limit ) SCREAMING_SNAKE_CASE_ : Tuple = dataset.select(range(__lowerCamelCase ) ) print(f'Limiting the dataset to {args.limit} entries.' ) SCREAMING_SNAKE_CASE_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(args.output_dir , args.split ) if not os.path.exists(__lowerCamelCase ): os.makedirs(__lowerCamelCase ) else: SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. SCREAMING_SNAKE_CASE_ : Any = tokenize_function(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(__a ): # Concatenate all texts. SCREAMING_SNAKE_CASE_ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()} SCREAMING_SNAKE_CASE_ : str = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 SCREAMING_SNAKE_CASE_ : Any = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. SCREAMING_SNAKE_CASE_ : Union[str, Any] = { k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )] for k, t in concatenated_examples.items() } return result SCREAMING_SNAKE_CASE_ : Any = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=10_00 , num_proc=4 ) SCREAMING_SNAKE_CASE_ : Dict = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = 0 for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ): SCREAMING_SNAKE_CASE_ : int = grouped_dataset[shard : shard + args.shard_size] SCREAMING_SNAKE_CASE_ : Any = len(dataset_snapshot['''input_ids'''] ) SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(__lowerCamelCase , f'dataset-{shard_count}-{records_containing}.tfrecord' ) SCREAMING_SNAKE_CASE_ : Tuple = get_serialized_examples(__lowerCamelCase ) with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file: for i in range(len(__lowerCamelCase ) ): SCREAMING_SNAKE_CASE_ : Dict = serialized_examples[i] out_file.write(__lowerCamelCase ) print('''Wrote file {} containing {} records'''.format(__lowerCamelCase , __lowerCamelCase ) ) shard_count += 1 total_records += records_containing with open(f'split-{args.split}-records-count.txt' , '''w''' ) as f: print(f'Total {args.split} records: {total_records}' , file=__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = parse_args() main(args)
91
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __lowerCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __lowerCamelCase = TaTokenizerFast __lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __lowerCamelCase = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
59
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() __lowerCAmelCase : Tuple = BlipImageProcessor() __lowerCAmelCase : Optional[int] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) __lowerCAmelCase : Dict = BlipaProcessor(snake_case__ , snake_case__ ) processor.save_pretrained(self.tmpdirname ) def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ): return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ): return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor def __lowerCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : Dict = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) __lowerCAmelCase : Dict = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = self.get_image_processor() __lowerCAmelCase : str = self.get_tokenizer() __lowerCAmelCase : Optional[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) __lowerCAmelCase : Optional[Any] = self.prepare_image_inputs() __lowerCAmelCase : Optional[int] = image_processor(snake_case__ , return_tensors='np' ) __lowerCAmelCase : List[Any] = processor(images=snake_case__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = self.get_image_processor() __lowerCAmelCase : Tuple = self.get_tokenizer() __lowerCAmelCase : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) __lowerCAmelCase : Union[str, Any] = "lower newer" __lowerCAmelCase : int = processor(text=snake_case__ ) __lowerCAmelCase : List[str] = tokenizer(snake_case__ , return_token_type_ids=snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = self.get_image_processor() __lowerCAmelCase : List[Any] = self.get_tokenizer() __lowerCAmelCase : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) __lowerCAmelCase : int = "lower newer" __lowerCAmelCase : str = self.prepare_image_inputs() __lowerCAmelCase : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.get_image_processor() __lowerCAmelCase : List[str] = self.get_tokenizer() __lowerCAmelCase : Tuple = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) __lowerCAmelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase : Dict = processor.batch_decode(snake_case__ ) __lowerCAmelCase : str = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = self.get_image_processor() __lowerCAmelCase : int = self.get_tokenizer() __lowerCAmelCase : Optional[int] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) __lowerCAmelCase : List[str] = "lower newer" __lowerCAmelCase : int = self.prepare_image_inputs() __lowerCAmelCase : Dict = processor(text=snake_case__ , images=snake_case__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
86
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class UpperCAmelCase : def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]: '''simple docstring''' logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) snake_case : Optional[Any] = model snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ ) snake_case : int = kwargs.get("latest_model_name" , snake_case__ ) def __call__(self : Tuple , **snake_case__ : str ) -> List[str]: '''simple docstring''' snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()} return self.model.run(snake_case__ , snake_case__ ) @staticmethod def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any: '''simple docstring''' if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) snake_case : Optional[int] = "CPUExecutionProvider" return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]: '''simple docstring''' snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name ) snake_case : str = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ ) if src_path.exists(): snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str: '''simple docstring''' if os.path.isfile(snake_case__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(snake_case__ , exist_ok=snake_case__ ) # saving model weights/files self._save_pretrained(snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple: '''simple docstring''' snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(snake_case__ ): snake_case : Any = OnnxRuntimeModel.load_model( os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ ) snake_case : Union[str, Any] = Path(snake_case__ ) # load model from hub else: # download model snake_case : Dict = hf_hub_download( repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , ) snake_case : List[Any] = Path(snake_case__ ).parent snake_case : Union[str, Any] = Path(snake_case__ ).name snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ ) return cls(model=snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = None if len(str(snake_case__ ).split("@" ) ) == 2: snake_case , snake_case : int = model_id.split("@" ) return cls._from_pretrained( model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
59
0
'''simple docstring''' from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class __A ( A_ ): def _lowercase (self : str ): return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _lowercase (self : Optional[int] ): UpperCAmelCase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} return Dataset.from_dict(snake_case__ ) def _lowercase (self : int ): UpperCAmelCase_ = self._create_example_records() UpperCAmelCase_ = Dataset.from_list(snake_case__ ) self.assertListEqual(dset.column_names , ["col_1", "col_2"] ) for i, r in enumerate(snake_case__ ): self.assertDictEqual(snake_case__ , example_records[i] ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._create_example_records() UpperCAmelCase_ = Dataset.from_list(snake_case__ ) UpperCAmelCase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def _lowercase (self : Optional[int] ): # checks what happens with missing columns UpperCAmelCase_ = [{"col_1": 1}, {"col_2": "x"}] UpperCAmelCase_ = Dataset.from_list(snake_case__ ) self.assertDictEqual(dset[0] , {"col_1": 1} ) self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns def _lowercase (self : str ): # checks if the type can be inferred from the second record UpperCAmelCase_ = [{"col_1": []}, {"col_1": [1, 2]}] UpperCAmelCase_ = Dataset.from_list(snake_case__ ) self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = Dataset.from_list([] ) self.assertEqual(len(snake_case__ ) , 0 ) self.assertListEqual(dset.column_names , [] )
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger() @dataclass class UpperCAmelCase : A__ : nn.Module A__ : List[nn.Module] = field(default_factory=A_ ) A__ : list = field(default_factory=A_ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Tensor , snake_case__ : Tensor ) -> Optional[Any]: '''simple docstring''' snake_case : List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case__ ) def __call__(self : List[Any] , snake_case__ : Tensor ) -> List[Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case__ ) [x.remove() for x in self.handles] return self @property def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]: '''simple docstring''' return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class UpperCAmelCase : A__ : nn.Module A__ : nn.Module A__ : int = 1 A__ : List = field(default_factory=A_ ) A__ : List = field(default_factory=A_ ) A__ : bool = True def __call__(self : List[Any] , snake_case__ : Tensor ) -> Any: '''simple docstring''' snake_case : str = Tracker(self.dest )(snake_case__ ).parametrized snake_case : Optional[int] = Tracker(self.src )(snake_case__ ).parametrized snake_case : List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) ) snake_case : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) ) if len(snake_case__ ) != len(snake_case__ ) and self.raise_if_mismatch: raise Exception( f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while""" f""" destination module has {len(snake_case__ )}.""" ) for dest_m, src_m in zip(snake_case__ , snake_case__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) class UpperCAmelCase ( nn.Module ): def __init__(self : Tuple , snake_case__ : nn.Module ) -> Optional[Any]: '''simple docstring''' super().__init__() snake_case : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block" ), f"""Unexpected layer name {k}""" snake_case : Union[str, Any] = len(snake_case__ ) + 1 feature_blocks.append((f"""res{block_index}""", v) ) snake_case : Optional[Any] = nn.ModuleDict(snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tensor ) -> Dict: '''simple docstring''' return get_trunk_forward_outputs( snake_case__ , out_feat_keys=snake_case__ , feature_blocks=self._feature_blocks , ) class UpperCAmelCase ( A_ ): def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> str: '''simple docstring''' snake_case : List[Any] = x.split("-" ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__(self : Optional[int] , snake_case__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]: '''simple docstring''' if x not in self: snake_case : Dict = self.convert_name_to_timm(snake_case__ ) snake_case : Union[str, Any] = partial(lambda: (timm.create_model(snake_case__ , pretrained=snake_case__ ).eval(), None) ) else: snake_case : List[str] = super().__getitem__(snake_case__ ) return val class UpperCAmelCase ( A_ ): def __getitem__(self : Dict , snake_case__ : str ) -> Callable[[], nn.Module]: '''simple docstring''' if "seer" in x and "in1k" not in x: snake_case : str = RegNetModel else: snake_case : Optional[Any] = RegNetForImageClassification return val def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Tuple[str, str]] ): for from_key, to_key in keys: snake_case : str = from_state_dict[from_key].clone() print(f"""Copied key={from_key} to={to_key}""" ) return to_state_dict def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : RegNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True , ): print(f"""Converting {name}...""" ) with torch.no_grad(): snake_case , snake_case : int = from_model_func() snake_case : str = our_model_func(__lowerCamelCase ).eval() snake_case : int = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase , raise_if_mismatch=__lowerCamelCase ) snake_case : Dict = torch.randn((1, 3, 224, 224) ) module_transfer(__lowerCamelCase ) if from_state_dict is not None: snake_case : str = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: snake_case : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] snake_case : Optional[Any] = manually_copy_vissl_head(__lowerCamelCase , our_model.state_dict() , __lowerCamelCase ) our_model.load_state_dict(__lowerCamelCase ) snake_case : Any = our_model(__lowerCamelCase , output_hidden_states=__lowerCamelCase ) snake_case : Union[str, Any] = ( our_outputs.logits if isinstance(__lowerCamelCase , __lowerCamelCase ) else our_outputs.last_hidden_state ) snake_case : Union[str, Any] = from_model(__lowerCamelCase ) snake_case : Dict = from_output[-1] if type(__lowerCamelCase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: snake_case : Any = our_outputs.hidden_states[-1] assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowerCamelCase , ) snake_case : List[str] = 224 if "seer" not in name else 384 # we can use the convnext one snake_case : int = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowerCamelCase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowerCamelCase , ) print(f"""Pushed {name}""" ) def UpperCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ): snake_case : Union[str, Any] = "imagenet-1k-id2label.json" snake_case : List[str] = 1000 snake_case : List[str] = (1, num_labels) snake_case : Any = "huggingface/label-files" snake_case : List[str] = num_labels snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) ) snake_case : List[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} snake_case : str = idalabel snake_case : List[Any] = {v: k for k, v in idalabel.items()} snake_case : Dict = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) snake_case : Optional[Any] = { "regnet-x-002": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ), "regnet-x-004": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ), "regnet-x-006": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ), "regnet-x-008": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ), "regnet-x-016": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ), "regnet-x-032": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ), "regnet-x-040": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ), "regnet-x-064": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ), "regnet-x-080": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ), "regnet-x-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ), "regnet-x-160": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ), "regnet-x-320": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ), # y variant "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), "regnet-y-004": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), "regnet-y-006": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), "regnet-y-008": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), "regnet-y-016": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), "regnet-y-032": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), "regnet-y-040": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), "regnet-y-064": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), "regnet-y-080": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), "regnet-y-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), "regnet-y-160": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), "regnet-y-320": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer": RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer": RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } snake_case : Union[str, Any] = NameToOurModelFuncMap() snake_case : str = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(__lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]: snake_case : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , model_dir=str(__lowerCamelCase ) , map_location="cpu" ) snake_case : Dict = model_func() # check if we have a head, if yes add it snake_case : str = files["classy_state_dict"]["base_model"]["model"] snake_case : Dict = model_state_dict["trunk"] model.load_state_dict(__lowerCamelCase ) return model.eval(), model_state_dict["heads"] # pretrained snake_case : List[Any] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : Optional[int] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : List[str] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case : Tuple = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned snake_case : List[Any] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : Tuple = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : str = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case : Dict = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( __lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( __lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported regnet* architecture,""" """ currently: regnetx-*, regnety-*. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
59
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __A =logging.get_logger(__name__) __A ={ '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''', '''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''', '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json''' ), } class _SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ = "longformer" def __init__( self , lowercase = 512 , lowercase = 2 , lowercase = 1 , lowercase = 0 , lowercase = 2 , lowercase = 30522 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 2 , lowercase = 0.0_2 , lowercase = 1e-12 , lowercase = False , **lowercase , ) -> Union[str, Any]: super().__init__(pad_token_id=snake_case__ , **snake_case__ ) lowerCamelCase_ = attention_window lowerCamelCase_ = sep_token_id lowerCamelCase_ = bos_token_id lowerCamelCase_ = eos_token_id lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_act lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = onnx_export class _SCREAMING_SNAKE_CASE ( A_ ): def __init__( self , lowercase , lowercase = "default" , lowercase = None ) -> Dict: super().__init__(snake_case__ , snake_case__ , snake_case__ ) lowerCamelCase_ = True @property def SCREAMING_SNAKE_CASE_( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("global_attention_mask", dynamic_axis), ] ) @property def SCREAMING_SNAKE_CASE_( self ) -> Mapping[str, Mapping[int, str]]: lowerCamelCase_ = super().outputs if self.task == "default": lowerCamelCase_ = {0: "batch"} return outputs @property def SCREAMING_SNAKE_CASE_( self ) -> float: return 1e-4 @property def SCREAMING_SNAKE_CASE_( self ) -> int: return max(super().default_onnx_opset , 14 ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]: lowerCamelCase_ = super().generate_dummy_inputs( preprocessor=snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowerCamelCase_ = torch.zeros_like(inputs["input_ids"] ) # make every second token global lowerCamelCase_ = 1 return inputs
19
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def UpperCamelCase ( __lowerCamelCase : List[Any] ): return 1.0 / (1.0 + np.exp(-_outputs )) def UpperCamelCase ( __lowerCamelCase : int ): snake_case : Tuple = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase ) snake_case : int = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase ) class UpperCAmelCase ( A_ ): A__ : Any = "sigmoid" A__ : str = "softmax" A__ : int = "none" @add_end_docstrings( A_ ,r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " ,) class UpperCAmelCase ( A_ ): A__ : int = False A__ : Union[str, Any] = ClassificationFunction.NONE def __init__(self : List[str] , **snake_case__ : int ) -> str: '''simple docstring''' super().__init__(**snake_case__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]="" , **snake_case__ : List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = tokenizer_kwargs snake_case : List[Any] = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: snake_case : Optional[int] = self.model.config.return_all_scores if isinstance(snake_case__ , snake_case__ ) or top_k is None: snake_case : List[Any] = top_k snake_case : str = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case__ , ) if return_all_scores: snake_case : List[str] = None else: snake_case : Optional[int] = 1 if isinstance(snake_case__ , snake_case__ ): snake_case : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: snake_case : Optional[int] = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self : Dict , *snake_case__ : List[str] , **snake_case__ : int ) -> Optional[int]: '''simple docstring''' snake_case : Optional[int] = super().__call__(*snake_case__ , **snake_case__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. snake_case : Tuple = "top_k" not in kwargs if isinstance(args[0] , snake_case__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , **snake_case__ : Union[str, Any] ) -> Dict[str, GenericTensor]: '''simple docstring''' snake_case : int = self.framework if isinstance(snake_case__ , snake_case__ ): return self.tokenizer(**snake_case__ , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1 and isinstance(inputs[0] , snake_case__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] ) -> int: '''simple docstring''' return self.model(**snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : Dict=1 , snake_case__ : Tuple=True ) -> str: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: snake_case : Tuple = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: snake_case : Tuple = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: snake_case : Tuple = self.model.config.function_to_apply else: snake_case : int = ClassificationFunction.NONE snake_case : Any = model_outputs["logits"][0] snake_case : List[str] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: snake_case : Optional[Any] = sigmoid(snake_case__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: snake_case : Union[str, Any] = softmax(snake_case__ ) elif function_to_apply == ClassificationFunction.NONE: snake_case : Optional[Any] = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} snake_case : Optional[int] = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case__ ) ] if not _legacy: dict_scores.sort(key=lambda snake_case__ : x["score"] , reverse=snake_case__ ) if top_k is not None: snake_case : Optional[int] = dict_scores[:top_k] return dict_scores
59
0
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: SCREAMING_SNAKE_CASE :int = None SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE :List[Any] = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } SCREAMING_SNAKE_CASE :Optional[Any] = { 'facebook/nllb-large-en-ro': 1024, 'facebook/nllb-200-distilled-600M': 1024, } # fmt: off SCREAMING_SNAKE_CASE :str = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class UpperCAmelCase ( A_ ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = ["input_ids", "attention_mask"] snake_case_ = NllbTokenizer snake_case_ = [] snake_case_ = [] def __init__( self : Union[str, Any] ,A : Union[str, Any]=None ,A : int=None ,A : Any="<s>" ,A : List[Any]="</s>" ,A : List[str]="</s>" ,A : Optional[int]="<s>" ,A : str="<unk>" ,A : List[Any]="<pad>" ,A : int="<mask>" ,A : int=None ,A : Tuple=None ,A : str=None ,A : List[str]=False ,**A : Optional[int] ,): __A = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else mask_token __A = legacy_behaviour super().__init__( vocab_file=snake_case__ ,tokenizer_file=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,sep_token=snake_case__ ,cls_token=snake_case__ ,unk_token=snake_case__ ,pad_token=snake_case__ ,mask_token=snake_case__ ,src_lang=snake_case__ ,tgt_lang=snake_case__ ,additional_special_tokens=snake_case__ ,legacy_behaviour=snake_case__ ,**snake_case__ ,) __A = vocab_file __A = False if not self.vocab_file else True __A = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) __A = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __A = src_lang if src_lang is not None else "eng_Latn" __A = self.convert_tokens_to_ids(self._src_lang ) __A = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase_ ( self : List[Any] ): return self._src_lang @src_lang.setter def UpperCamelCase_ ( self : int ,A : str ): __A = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCamelCase_ ( self : List[str] ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase_ ( self : Optional[Any] ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : str ,A : Optional[str] ,A : Optional[str] ,**A : str ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) __A = src_lang __A = self(snake_case__ ,add_special_tokens=snake_case__ ,return_tensors=snake_case__ ,**snake_case__ ) __A = self.convert_tokens_to_ids(snake_case__ ) __A = tgt_lang_id return inputs def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ,A : str = "eng_Latn" ,A : Optional[List[str]] = None ,A : str = "fra_Latn" ,**A : List[str] ,): __A = src_lang __A = tgt_lang return super().prepare_seqaseq_batch(snake_case__ ,snake_case__ ,**snake_case__ ) def UpperCamelCase_ ( self : Dict ): return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase_ ( self : str ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase_ ( self : Optional[int] ,A : List[Any] ): __A = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: __A = [] __A = [self.eos_token_id, self.cur_lang_code] else: __A = [self.cur_lang_code] __A = [self.eos_token_id] __A = self.convert_ids_to_tokens(self.prefix_tokens ) __A = self.convert_ids_to_tokens(self.suffix_tokens ) __A = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,) def UpperCamelCase_ ( self : int ,A : str ): __A = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: __A = [] __A = [self.eos_token_id, self.cur_lang_code] else: __A = [self.cur_lang_code] __A = [self.eos_token_id] __A = self.convert_ids_to_tokens(self.prefix_tokens ) __A = self.convert_ids_to_tokens(self.suffix_tokens ) __A = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,) def UpperCamelCase_ ( self : int ,A : str ,A : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return __A = os.path.join( snake_case__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file ,snake_case__ ) return (out_vocab_file,)
15
from __future__ import annotations __lowerCamelCase = list[list[int]] # assigning initial values to the grid __lowerCamelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __lowerCamelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def UpperCamelCase ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def UpperCamelCase ( __lowerCamelCase : Matrix ): for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def UpperCamelCase ( __lowerCamelCase : Matrix ): if location := find_empty_location(__lowerCamelCase ): snake_case , snake_case : Union[str, Any] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): snake_case : List[Any] = digit if sudoku(__lowerCamelCase ) is not None: return grid snake_case : Union[str, Any] = 0 return None def UpperCamelCase ( __lowerCamelCase : Matrix ): for row in grid: for cell in row: print(__lowerCamelCase , end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") __lowerCamelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
59
0
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A_ ): """simple docstring""" __a = ["input_features"] def __init__( self : List[str] , UpperCamelCase : int=80 , UpperCamelCase : List[Any]=16_000 , UpperCamelCase : List[Any]=160 , UpperCamelCase : Tuple=30 , UpperCamelCase : Dict=400 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Dict , ): '''simple docstring''' super().__init__( feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , ) __UpperCAmelCase : Dict = n_fft __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : List[Any] = chunk_length __UpperCAmelCase : List[str] = chunk_length * sampling_rate __UpperCAmelCase : int = self.n_samples // hop_length __UpperCAmelCase : List[Any] = sampling_rate __UpperCAmelCase : List[str] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case__ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=snake_case__ , norm="""slaney""" , mel_scale="""slaney""" , ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : np.array ): '''simple docstring''' __UpperCAmelCase : Any = spectrogram( snake_case__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , ) __UpperCAmelCase : str = log_spec[:, :-1] __UpperCAmelCase : Any = np.maximum(snake_case__ , log_spec.max() - 8.0 ) __UpperCAmelCase : List[str] = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCamelCase__ ( UpperCamelCase : List[np.ndarray] , UpperCamelCase : List[np.ndarray] , UpperCamelCase : float = 0.0 ): '''simple docstring''' if attention_mask is not None: __UpperCAmelCase : Any = np.array(snake_case__ , np.intaa ) __UpperCAmelCase : Dict = [] for vector, length in zip(snake_case__ , attention_mask.sum(-1 ) ): __UpperCAmelCase : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: __UpperCAmelCase : Union[str, Any] = padding_value normed_input_values.append(snake_case__ ) else: __UpperCAmelCase : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self : str , UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase : bool = True , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[str] = "max_length" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __UpperCAmelCase : Optional[int] = isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCAmelCase : List[Any] = is_batched_numpy or ( isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCAmelCase : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(snake_case__ , np.ndarray ): __UpperCAmelCase : List[str] = np.asarray(snake_case__ , dtype=np.floataa ) elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCAmelCase : List[str] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCAmelCase : Dict = [np.asarray([raw_speech] ).T] __UpperCAmelCase : Optional[int] = BatchFeature({"""input_features""": raw_speech} ) # convert into correct format for padding __UpperCAmelCase : Optional[Any] = self.pad( snake_case__ , padding=snake_case__ , max_length=max_length if max_length else self.n_samples , truncation=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: __UpperCAmelCase : Optional[Any] = self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , ) __UpperCAmelCase : Union[str, Any] = np.stack(padded_inputs["""input_features"""] , axis=0 ) # make sure list is in array format __UpperCAmelCase : List[Any] = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 ) __UpperCAmelCase : str = [self._np_extract_fbank_features(snake_case__ ) for waveform in input_features[0]] if isinstance(input_features[0] , snake_case__ ): __UpperCAmelCase : Dict = [np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_features] else: __UpperCAmelCase : Tuple = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) __UpperCAmelCase : str = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: __UpperCAmelCase : int = padded_inputs.convert_to_tensors(snake_case__ ) return padded_inputs def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) __UpperCAmelCase : Dict = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
115
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def UpperCamelCase ( __lowerCamelCase : np.ndarray ): return input_array.reshape((input_array.size, 1) ) def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Any = np.nan for i in range(__lowerCamelCase ): snake_case : List[str] = features[:, labels == i] snake_case : Dict = data.mean(1 ) # Centralize the data of class i snake_case : Optional[Any] = data - column_reshape(__lowerCamelCase ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(__lowerCamelCase , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Optional[Any] = features.mean(1 ) snake_case : Tuple = np.nan for i in range(__lowerCamelCase ): snake_case : Tuple = features[:, labels == i] snake_case : Tuple = data.shape[1] snake_case : List[str] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[int] = device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): # Check if the features have been loaded if features.any(): snake_case : Tuple = features.mean(1 ) # Center the dataset snake_case : List[str] = features - np.reshape(__lowerCamelCase , (data_mean.size, 1) ) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) / features.shape[1] snake_case , snake_case : Dict = np.linalg.eigh(__lowerCamelCase ) # Take all the columns in the reverse order (-1), and then takes only the first snake_case : Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space snake_case : Union[str, Any] = np.dot(filtered_eigenvectors.T , __lowerCamelCase ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ): assert classes > dimensions # Check if features have been already loaded if features.any: snake_case , snake_case : str = eigh( covariance_between_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , covariance_within_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , ) snake_case : str = eigenvectors[:, ::-1][:, :dimensions] snake_case , snake_case , snake_case : int = np.linalg.svd(__lowerCamelCase ) snake_case : List[Any] = svd_matrix[:, 0:dimensions] snake_case : Optional[Any] = np.dot(filtered_svd_matrix.T , __lowerCamelCase ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( ): # Create dummy dataset with 2 classes and 3 features snake_case : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) snake_case : Union[str, Any] = np.array([0, 0, 0, 1, 1] ) snake_case : List[Any] = 2 snake_case : Any = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(__lowerCamelCase ) as error_info: snake_case : str = linear_discriminant_analysis( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if isinstance(__lowerCamelCase , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def UpperCamelCase ( ): snake_case : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) snake_case : List[str] = 2 snake_case : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(__lowerCamelCase ) as error_info: snake_case : Union[str, Any] = principal_component_analysis(__lowerCamelCase , __lowerCamelCase ) if not np.allclose(__lowerCamelCase , __lowerCamelCase ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
59
0
'''simple docstring''' import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __lowerCAmelCase : List[Any] =logging.getLogger(__name__) __lowerCAmelCase : List[str] ="pytorch_model.bin" @dataclasses.dataclass class UpperCAmelCase : __lowercase = dataclasses.field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} ) __lowercase = dataclasses.field( default=A_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , ) @dataclasses.dataclass class UpperCAmelCase : __lowercase = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} ) __lowercase = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} ) __lowercase = dataclasses.field( default=A_ , metadata={"""help""": """A csv or a json file containing the validation data."""} ) __lowercase = dataclasses.field( default=A_ , metadata={"""help""": """The name of the task to train on."""} , ) __lowercase = dataclasses.field( default=A_ , metadata={"""help""": """The list of labels for the task."""} ) @dataclasses.dataclass class UpperCAmelCase : __lowercase = dataclasses.field( metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} ) __lowercase = dataclasses.field( default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} ) __lowercase = dataclasses.field( default="""no""" , metadata={ """help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]""" } , ) __lowercase = dataclasses.field( default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , ) __lowercase = dataclasses.field( default=0.0 , metadata={ """help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions.""" } , ) __lowercase = dataclasses.field( default=A_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , ) __lowercase = dataclasses.field( default=A_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , ) __lowercase = dataclasses.field( default=A_ , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , ) __lowercase = dataclasses.field( default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , ) __lowercase = dataclasses.field( default=100 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , ) __lowercase = dataclasses.field( default=A_ , metadata={"""help""": """Random seed for initialization."""} , ) def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ): A__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: A__ = dataset.filter(lambda _lowerCamelCase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 A__ = int(eval_result * len(__lowerCamelCase ) ) print(__lowerCamelCase ) A__ = dataset.sort("probability" , reverse=__lowerCamelCase ) A__ = dataset.select(range(__lowerCamelCase ) ) A__ = dataset.remove_columns(["label", "probability"] ) A__ = dataset.rename_column("prediction" , "label" ) A__ = dataset.map(lambda _lowerCamelCase : {"label": idalabel[example["label"]]} ) A__ = dataset.shuffle(seed=args.seed ) A__ = os.path.join(__lowerCamelCase , F"train_pseudo.{args.data_file_extension}" ) if args.data_file_extension == "csv": dataset.to_csv(__lowerCamelCase , index=__lowerCamelCase ) else: dataset.to_json(__lowerCamelCase ) def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , **_lowerCamelCase : List[Any] ): A__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() A__ = STModelArguments(model_name_or_path=__lowerCamelCase ) A__ = STDataArguments(train_file=__lowerCamelCase , infer_file=__lowerCamelCase ) A__ = STTrainingArguments(output_dir=__lowerCamelCase ) A__ = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__lowerCamelCase ).items(): setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for key, value in kwargs.items(): if hasattr(__lowerCamelCase , __lowerCamelCase ): setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Sanity checks A__ = {} A__ = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None A__ = args.train_file A__ = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None A__ = args.eval_file for key in data_files: A__ = data_files[key].split("." )[-1] assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file." if args.data_file_extension is None: A__ = extension else: assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`." assert ( args.eval_metric in datasets.list_metrics() ), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("Creating the initial data directory for self-training..." ) A__ = F"{args.output_dir}/self-train_iter-{{}}".format A__ = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) accelerator.wait_for_everyone() A__ = None A__ = None A__ = 0 A__ = False # Show the progress bar A__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): A__ = data_dir_format(__lowerCamelCase ) assert os.path.exists(__lowerCamelCase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 A__ = os.path.join(__lowerCamelCase , "stage-1" ) A__ = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__lowerCamelCase , __lowerCamelCase ): arguments_dict.update({key: value} ) A__ = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowerCamelCase , __lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowerCamelCase ) finetune(**__lowerCamelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCamelCase ) logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowerCamelCase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data A__ = os.path.join(__lowerCamelCase , "best-checkpoint" ) A__ = os.path.join(__lowerCamelCase , "stage-2" ) # Update arguments_dict A__ = model_path A__ = data_files["train"] A__ = current_output_dir A__ = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowerCamelCase , __lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowerCamelCase ) finetune(**__lowerCamelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCamelCase ) logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowerCamelCase ) A__ = iteration A__ = data_dir_format(iteration + 1 ) A__ = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase , "best-checkpoint" ) ) A__ = config.idalabel A__ = os.path.join(__lowerCamelCase , "eval_results_best-checkpoint.json" ) A__ = os.path.join(__lowerCamelCase , "test_results_best-checkpoint.json" ) assert os.path.exists(__lowerCamelCase ) with open(__lowerCamelCase , "r" ) as f: A__ = float(json.load(__lowerCamelCase )[args.eval_metric] ) A__ = os.path.join(__lowerCamelCase , "infer_output_best-checkpoint.csv" ) assert os.path.exists(__lowerCamelCase ) # Loading the dataset from local csv or json files. A__ = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"] A__ = load_dataset("csv" , data_files={"data": infer_output_file} )["data"] if accelerator.is_main_process: os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , F"eval_results_iter-{iteration}.json" ) ) if os.path.exists(__lowerCamelCase ): shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , F"test_results_iter-{iteration}.json" ) ) create_pseudo_labeled_data(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) accelerator.wait_for_everyone() A__ = os.path.join(__lowerCamelCase , F"train_pseudo.{args.data_file_extension}" ) if args.evaluation_strategy != IntervalStrategy.NO.value: A__ = eval_result if best_iteration is None: A__ = new_iteration A__ = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: A__ = new_iteration A__ = new_eval_result A__ = 0 else: if new_eval_result == best_eval_result: A__ = new_iteration A__ = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: A__ = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" , __lowerCamelCase ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCamelCase , F"eval_results_iter-{iteration}.json" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCamelCase , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
237
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def UpperCamelCase ( __lowerCamelCase : Optional[int] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def UpperCamelCase ( __lowerCamelCase : str ): class UpperCAmelCase : def __init__(self : Optional[int] , snake_case__ : str ) -> Any: '''simple docstring''' snake_case : List[str] = metric_id class UpperCAmelCase : A__ : List[str] = [MetricMock(A_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]: '''simple docstring''' return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Any ): if "tmp_path" in args: snake_case : str = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(__lowerCamelCase , match="https://huggingface.co/docs/evaluate" ): func(*__lowerCamelCase )
59
0
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=4 , ) -> Dict: __UpperCamelCase :Any = parent __UpperCamelCase :List[Any] = batch_size __UpperCamelCase :Optional[int] = seq_length __UpperCamelCase :Dict = is_training __UpperCamelCase :Union[str, Any] = use_attention_mask __UpperCamelCase :Union[str, Any] = use_token_type_ids __UpperCamelCase :Optional[int] = use_labels __UpperCamelCase :Tuple = vocab_size __UpperCamelCase :List[str] = hidden_size __UpperCamelCase :int = num_hidden_layers __UpperCamelCase :List[Any] = num_attention_heads __UpperCamelCase :Optional[Any] = intermediate_size __UpperCamelCase :List[Any] = hidden_act __UpperCamelCase :List[str] = hidden_dropout_prob __UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob __UpperCamelCase :Dict = max_position_embeddings __UpperCamelCase :Optional[Any] = type_vocab_size __UpperCamelCase :List[str] = type_sequence_label_size __UpperCamelCase :List[str] = initializer_range __UpperCamelCase :Optional[int] = num_choices def UpperCamelCase__ ( self) -> List[str]: __UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __UpperCamelCase :Optional[int] = None if self.use_attention_mask: __UpperCamelCase :Dict = random_attention_mask([self.batch_size, self.seq_length]) __UpperCamelCase :Union[str, Any] = None if self.use_token_type_ids: __UpperCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __UpperCamelCase :List[str] = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCamelCase__ ( self) -> List[str]: __UpperCamelCase :Union[str, Any] = self.prepare_config_and_inputs() __UpperCamelCase :Union[str, Any] = config_and_inputs __UpperCamelCase :Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def UpperCamelCase__ ( self) -> int: __UpperCamelCase :str = self.prepare_config_and_inputs() __UpperCamelCase :Dict = config_and_inputs __UpperCamelCase :Any = True __UpperCamelCase :List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) __UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowerCamelCase_ ( A_ , unittest.TestCase ): '''simple docstring''' a__ : List[str] = True a__ : Union[str, Any] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def UpperCamelCase__ ( self) -> Any: __UpperCamelCase :Tuple = FlaxRobertaModelTester(self) @slow def UpperCamelCase__ ( self) -> List[str]: for model_class_name in self.all_model_classes: __UpperCamelCase :List[str] = model_class_name.from_pretrained('''roberta-base''' , from_pt=snake_case__) __UpperCamelCase :Dict = model(np.ones((1, 1))) self.assertIsNotNone(snake_case__)
43
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __lowerCamelCase = logging.getLogger(__name__) __lowerCamelCase = """pytorch_model.bin""" @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} ,) @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "A csv or a json file containing the validation data."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "The name of the task to train on."} ,) A__ : Optional[List[str]] = dataclasses.field( default=A_ ,metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) A__ : Optional[str] = dataclasses.field( default="accuracy" ,metadata={"help": "The evaluation metric used for the task."} ) A__ : Optional[str] = dataclasses.field( default="no" ,metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } ,) A__ : Optional[int] = dataclasses.field( default=10 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,) A__ : Optional[float] = dataclasses.field( default=0.0 ,metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} ,) A__ : Optional[float] = dataclasses.field( default=0.0 ,metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} ,) A__ : Optional[int] = dataclasses.field( default=1_00 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,) A__ : Optional[int] = dataclasses.field( default=A_ ,metadata={"help": "Random seed for initialization."} ,) def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ): snake_case : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: snake_case : Optional[int] = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 snake_case : int = int(eval_result * len(__lowerCamelCase ) ) print(__lowerCamelCase ) snake_case : List[str] = dataset.sort("probability" , reverse=__lowerCamelCase ) snake_case : Tuple = dataset.select(range(__lowerCamelCase ) ) snake_case : List[Any] = dataset.remove_columns(["label", "probability"] ) snake_case : Any = dataset.rename_column("prediction" , "label" ) snake_case : str = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} ) snake_case : List[str] = dataset.shuffle(seed=args.seed ) snake_case : int = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(__lowerCamelCase , index=__lowerCamelCase ) else: dataset.to_json(__lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ): snake_case : int = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() snake_case : Dict = STModelArguments(model_name_or_path=__lowerCamelCase ) snake_case : Tuple = STDataArguments(train_file=__lowerCamelCase , infer_file=__lowerCamelCase ) snake_case : str = STTrainingArguments(output_dir=__lowerCamelCase ) snake_case : int = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__lowerCamelCase ).items(): setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for key, value in kwargs.items(): if hasattr(__lowerCamelCase , __lowerCamelCase ): setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Sanity checks snake_case : List[str] = {} snake_case : Optional[int] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None snake_case : str = args.train_file snake_case : Tuple = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None snake_case : Tuple = args.eval_file for key in data_files: snake_case : List[Any] = data_files[key].split("." )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: snake_case : Union[str, Any] = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("Creating the initial data directory for self-training..." ) snake_case : List[Any] = f"""{args.output_dir}/self-train_iter-{{}}""".format snake_case : Optional[int] = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) accelerator.wait_for_everyone() snake_case : Dict = None snake_case : Union[str, Any] = None snake_case : Tuple = 0 snake_case : List[Any] = False # Show the progress bar snake_case : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): snake_case : str = data_dir_format(__lowerCamelCase ) assert os.path.exists(__lowerCamelCase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 snake_case : Dict = os.path.join(__lowerCamelCase , "stage-1" ) snake_case : Optional[Any] = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__lowerCamelCase , __lowerCamelCase ): arguments_dict.update({key: value} ) snake_case : int = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowerCamelCase , __lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowerCamelCase ) finetune(**__lowerCamelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCamelCase ) logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowerCamelCase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data snake_case : str = os.path.join(__lowerCamelCase , "best-checkpoint" ) snake_case : Dict = os.path.join(__lowerCamelCase , "stage-2" ) # Update arguments_dict snake_case : List[str] = model_path snake_case : Optional[Any] = data_files["train"] snake_case : Optional[Any] = current_output_dir snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowerCamelCase , __lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowerCamelCase ) finetune(**__lowerCamelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCamelCase ) logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowerCamelCase ) snake_case : int = iteration snake_case : Tuple = data_dir_format(iteration + 1 ) snake_case : Tuple = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase , "best-checkpoint" ) ) snake_case : Optional[int] = config.idalabel snake_case : List[Any] = os.path.join(__lowerCamelCase , "eval_results_best-checkpoint.json" ) snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "test_results_best-checkpoint.json" ) assert os.path.exists(__lowerCamelCase ) with open(__lowerCamelCase , "r" ) as f: snake_case : Dict = float(json.load(__lowerCamelCase )[args.eval_metric] ) snake_case : Optional[int] = os.path.join(__lowerCamelCase , "infer_output_best-checkpoint.csv" ) assert os.path.exists(__lowerCamelCase ) # Loading the dataset from local csv or json files. snake_case : Optional[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"] snake_case : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"] if accelerator.is_main_process: os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(__lowerCamelCase ): shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) accelerator.wait_for_everyone() snake_case : str = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: snake_case : List[Any] = eval_result if best_iteration is None: snake_case : List[Any] = new_iteration snake_case : int = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: snake_case : int = new_iteration snake_case : Union[str, Any] = new_eval_result snake_case : str = 0 else: if new_eval_result == best_eval_result: snake_case : Any = new_iteration snake_case : Union[str, Any] = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: snake_case : Tuple = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" , __lowerCamelCase ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCamelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
59
0
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __SCREAMING_SNAKE_CASE ( A_ ): '''simple docstring''' def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ : Optional[Any] =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case__, '''tf_padding''' ) ) self.parent.assertTrue(hasattr(snake_case__, '''depth_multiplier''' ) ) class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Any=13, lowerCamelCase : int=3, lowerCamelCase : List[str]=32, lowerCamelCase : Any=0.25, lowerCamelCase : List[Any]=8, lowerCamelCase : List[str]=True, lowerCamelCase : Any=1024, lowerCamelCase : List[Any]=32, lowerCamelCase : Optional[int]="relu6", lowerCamelCase : List[str]=0.1, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Dict=True, lowerCamelCase : Tuple=10, lowerCamelCase : Tuple=None, )-> Dict: lowerCamelCase__ : Optional[int] =parent lowerCamelCase__ : List[str] =batch_size lowerCamelCase__ : Any =num_channels lowerCamelCase__ : List[Any] =image_size lowerCamelCase__ : List[str] =depth_multiplier lowerCamelCase__ : Optional[int] =min_depth lowerCamelCase__ : Optional[Any] =tf_padding lowerCamelCase__ : Tuple =int(last_hidden_size * depth_multiplier ) lowerCamelCase__ : List[Any] =output_stride lowerCamelCase__ : Union[str, Any] =hidden_act lowerCamelCase__ : Optional[Any] =classifier_dropout_prob lowerCamelCase__ : Dict =use_labels lowerCamelCase__ : Union[str, Any] =is_training lowerCamelCase__ : Optional[int] =num_labels lowerCamelCase__ : Optional[Any] =initializer_range lowerCamelCase__ : str =scope def snake_case ( self : List[str] )-> List[str]: lowerCamelCase__ : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[int] =None lowerCamelCase__ : int =None if self.use_labels: lowerCamelCase__ : int =ids_tensor([self.batch_size], self.num_labels ) lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) lowerCamelCase__ : Union[str, Any] =self.get_config() return config, pixel_values, labels, pixel_labels def snake_case ( self : List[str] )-> Optional[Any]: return MobileNetVaConfig( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def snake_case ( self : int, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any] )-> List[str]: lowerCamelCase__ : Optional[int] =MobileNetVaModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowerCamelCase__ : int =model(snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : Optional[int] )-> Any: lowerCamelCase__ : int =self.num_labels lowerCamelCase__ : Tuple =MobileNetVaForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowerCamelCase__ : Union[str, Any] =model(snake_case__, labels=snake_case__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case ( self : str )-> List[str]: lowerCamelCase__ : str =self.prepare_config_and_inputs() lowerCamelCase__ : List[Any] =config_and_inputs lowerCamelCase__ : Tuple ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( A_ , A_ , unittest.TestCase ): '''simple docstring''' _a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () _a = ( {"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification} if is_torch_available() else {} ) _a = False _a = False _a = False _a = False def snake_case ( self : List[Any] )-> Any: lowerCamelCase__ : List[Any] =MobileNetVaModelTester(self ) lowerCamelCase__ : int =MobileNetVaConfigTester(self, config_class=snake_case__, has_text_modality=snake_case__ ) def snake_case ( self : List[str] )-> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' ) def snake_case ( self : int )-> Dict: pass @unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' ) def snake_case ( self : List[str] )-> Tuple: pass @unittest.skip(reason='''MobileNetV1 does not output attentions''' ) def snake_case ( self : Any )-> Dict: pass def snake_case ( self : Dict )-> Any: lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int =model_class(snake_case__ ) lowerCamelCase__ : Tuple =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : str =[*signature.parameters.keys()] lowerCamelCase__ : Optional[int] =["pixel_values"] self.assertListEqual(arg_names[:1], snake_case__ ) def snake_case ( self : Optional[Any] )-> List[Any]: lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def snake_case ( self : Union[str, Any] )-> str: def check_hidden_states_output(lowerCamelCase : str, lowerCamelCase : Dict, lowerCamelCase : int ): lowerCamelCase__ : Optional[Any] =model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Any =model(**self._prepare_for_class(snake_case__, snake_case__ ) ) lowerCamelCase__ : Optional[int] =outputs.hidden_states lowerCamelCase__ : int =26 self.assertEqual(len(snake_case__ ), snake_case__ ) lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[str] =True check_hidden_states_output(snake_case__, snake_case__, snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Union[str, Any] =True check_hidden_states_output(snake_case__, snake_case__, snake_case__ ) def snake_case ( self : List[str] )-> Any: lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def snake_case ( self : Any )-> Union[str, Any]: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : str =MobileNetVaModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case ( self : Optional[int] )-> Tuple: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None ) @slow def snake_case ( self : Union[str, Any] )-> Union[str, Any]: lowerCamelCase__ : List[str] =MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(snake_case__ ) lowerCamelCase__ : Union[str, Any] =self.default_image_processor lowerCamelCase__ : Dict =prepare_img() lowerCamelCase__ : Tuple =image_processor(images=snake_case__, return_tensors='''pt''' ).to(snake_case__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[Any] =model(**snake_case__ ) # verify the logits lowerCamelCase__ : Optional[int] =torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape, snake_case__ ) lowerCamelCase__ : List[Any] =torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], snake_case__, atol=1E-4 ) )
238
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
59
0
from __future__ import annotations import collections import pprint from pathlib import Path def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Union[str, Any]: return "".join(sorted(__lowerCamelCase ) ) def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any: return word_by_signature[signature(__lowerCamelCase )] lowerCamelCase__ : Dict = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8') lowerCamelCase__ : Dict = sorted({word.strip().lower() for word in data.splitlines()}) lowerCamelCase__ : Optional[Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": lowerCamelCase__ : List[str] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('anagrams.txt', 'w') as file: file.write('all_anagrams = \n ') file.write(pprint.pformat(all_anagrams))
225
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase ( A_ ): A__ : List[str] = "megatron-bert" def __init__(self : Optional[int] , snake_case__ : List[str]=2_90_56 , snake_case__ : List[Any]=10_24 , snake_case__ : str=24 , snake_case__ : Tuple=16 , snake_case__ : Union[str, Any]=40_96 , snake_case__ : str="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_12 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : int=0 , snake_case__ : Tuple="absolute" , snake_case__ : Any=True , **snake_case__ : Union[str, Any] , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) snake_case : Tuple = vocab_size snake_case : str = hidden_size snake_case : str = num_hidden_layers snake_case : str = num_attention_heads snake_case : Optional[int] = hidden_act snake_case : int = intermediate_size snake_case : List[str] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Dict = max_position_embeddings snake_case : List[str] = type_vocab_size snake_case : List[str] = initializer_range snake_case : Tuple = layer_norm_eps snake_case : int = position_embedding_type snake_case : str = use_cache
59
0
"""simple docstring""" def a_ ( lowerCamelCase , lowerCamelCase ): def get_matched_characters(lowerCamelCase , lowerCamelCase ) -> str: UpperCAmelCase__ = [] UpperCAmelCase__ = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): UpperCAmelCase__ = int(max(0 , i - limit ) ) UpperCAmelCase__ = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(__lowerCamelCase ) UpperCAmelCase__ = f'''{_stra[0:_stra.index(__lowerCamelCase )]} {_stra[_stra.index(__lowerCamelCase ) + 1:]}''' return "".join(__lowerCamelCase ) # matching characters UpperCAmelCase__ = get_matched_characters(__lowerCamelCase , __lowerCamelCase ) UpperCAmelCase__ = get_matched_characters(__lowerCamelCase , __lowerCamelCase ) UpperCAmelCase__ = len(__lowerCamelCase ) # transposition UpperCAmelCase__ = ( len([(ca, ca) for ca, ca in zip(__lowerCamelCase , __lowerCamelCase ) if ca != ca] ) // 2 ) if not match_count: UpperCAmelCase__ = 0.0 else: UpperCAmelCase__ = ( 1 / 3 * ( match_count / len(__lowerCamelCase ) + match_count / len(__lowerCamelCase ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters UpperCAmelCase__ = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
98
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> List[str]: '''simple docstring''' return f"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy""" def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int: '''simple docstring''' super().tearDown() gc.collect() def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 , snake_case__ : Any=(4, 4, 64, 64) , snake_case__ : List[Any]=False ) -> int: '''simple docstring''' snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa snake_case : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return image def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> List[Any]: '''simple docstring''' snake_case : List[str] = jnp.bfloataa if fpaa else jnp.floataa snake_case : str = "bf16" if fpaa else None snake_case , snake_case : Optional[int] = FlaxUNetaDConditionModel.from_pretrained( snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ ) return model, params def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=(4, 77, 7_68) , snake_case__ : Dict=False ) -> List[str]: '''simple docstring''' snake_case : Any = jnp.bfloataa if fpaa else jnp.floataa snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ) -> List[str]: '''simple docstring''' snake_case , snake_case : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_latents(snake_case__ , fpaa=snake_case__ ) snake_case : List[str] = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ ) snake_case : Dict = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Optional[int] = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> str: '''simple docstring''' snake_case , snake_case : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ ) snake_case : List[str] = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 10_24) , fpaa=snake_case__ ) snake_case : Optional[int] = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Dict = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
59
0
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( A_ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self : str , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : int = 8 , **lowercase_ : str , ): '''simple docstring''' super().__init__(**snake_case__) SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale SCREAMING_SNAKE_CASE_ : Union[str, Any] = rescale_factor SCREAMING_SNAKE_CASE_ : str = do_pad SCREAMING_SNAKE_CASE_ : Union[str, Any] = pad_size def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any]): '''simple docstring''' return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : int , lowercase_ : Optional[Union[str, ChannelDimension]] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = get_image_size(snake_case__) SCREAMING_SNAKE_CASE_ : List[Any] = (old_height // size + 1) * size - old_height SCREAMING_SNAKE_CASE_ : int = (old_width // size + 1) * size - old_width return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=snake_case__) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : ImageInput , lowercase_ : Optional[bool] = None , lowercase_ : Optional[float] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : int = do_pad if do_pad is not None else self.do_pad SCREAMING_SNAKE_CASE_ : Tuple = pad_size if pad_size is not None else self.pad_size SCREAMING_SNAKE_CASE_ : Tuple = make_list_of_images(snake_case__) if not valid_images(snake_case__): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : List[str] = [to_numpy_array(snake_case__) for image in images] if do_rescale: SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.rescale(image=snake_case__ , scale=snake_case__) for image in images] if do_pad: SCREAMING_SNAKE_CASE_ : List[str] = [self.pad(snake_case__ , size=snake_case__) for image in images] SCREAMING_SNAKE_CASE_ : Any = [to_channel_dimension_format(snake_case__ , snake_case__) for image in images] SCREAMING_SNAKE_CASE_ : Tuple = {"pixel_values": images} return BatchFeature(data=snake_case__ , tensor_type=snake_case__)
91
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def UpperCamelCase ( __lowerCamelCase : Dataset , __lowerCamelCase : Dict[str, str] ): snake_case : int = args.log_outputs snake_case : Dict = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric snake_case : List[str] = load_metric("wer" ) snake_case : Tuple = load_metric("cer" ) # compute metrics snake_case : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] ) snake_case : int = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results snake_case : int = f"""WER: {wer_result}\nCER: {cer_result}""" print(__lowerCamelCase ) with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f: f.write(__lowerCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case : int = f"""log_{dataset_id}_predictions.txt""" snake_case : List[Any] = f"""log_{dataset_id}_targets.txt""" with open(__lowerCamelCase , "w" ) as p, open(__lowerCamelCase , "w" ) as t: # mapping function to write output def write_to_file(__lowerCamelCase : str , __lowerCamelCase : Optional[int] ): p.write(f"""{i}""" + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f"""{i}""" + "\n" ) t.write(batch["target"] + "\n" ) result.map(__lowerCamelCase , with_indices=__lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : str ): snake_case : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case : List[Any] = re.sub(__lowerCamelCase , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case : Optional[Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: snake_case : Dict = " ".join(text.split(__lowerCamelCase ) ) return text def UpperCamelCase ( __lowerCamelCase : int ): # load dataset snake_case : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case : Union[str, Any] = feature_extractor.sampling_rate # resample audio snake_case : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=__lowerCamelCase ) ) # load eval pipeline if args.device is None: snake_case : List[str] = 0 if torch.cuda.is_available() else -1 snake_case : str = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__lowerCamelCase : int ): snake_case : Dict = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case : str = prediction["text"] snake_case : Tuple = normalize_text(batch["sentence"] ) return batch # run inference on all examples snake_case : Dict = dataset.map(__lowerCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __lowerCamelCase = parser.parse_args() main(args)
59
0
"""simple docstring""" from math import sqrt def __lowerCAmelCase (_UpperCamelCase = 100_0000 ): __lowerCAmelCase : int = 0 __lowerCAmelCase : int = 0 __lowerCAmelCase : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'{solution() = }')
86
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class UpperCAmelCase ( A_ ): A__ : jnp.ndarray @flax_register_to_config class UpperCAmelCase ( nn.Module ,A_ ,A_ ): A__ : int = 32 A__ : int = 4 A__ : int = 4 A__ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) A__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") A__ : Union[bool, Tuple[bool]] = False A__ : Tuple[int] = (3_20, 6_40, 12_80, 12_80) A__ : int = 2 A__ : Union[int, Tuple[int]] = 8 A__ : Optional[Union[int, Tuple[int]]] = None A__ : int = 12_80 A__ : float = 0.0 A__ : bool = False A__ : jnp.dtype = jnp.floataa A__ : bool = True A__ : int = 0 A__ : bool = False def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' snake_case : Dict = (1, self.in_channels, self.sample_size, self.sample_size) snake_case : Any = jnp.zeros(snake_case__ , dtype=jnp.floataa ) snake_case : List[str] = jnp.ones((1,) , dtype=jnp.intaa ) snake_case : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case , snake_case : Optional[int] = jax.random.split(snake_case__ ) snake_case : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng} return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"] def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple: '''simple docstring''' snake_case : str = self.block_out_channels snake_case : Optional[Any] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case : Tuple = self.num_attention_heads or self.attention_head_dim # input snake_case : Tuple = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case : Union[str, Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case : Dict = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype ) snake_case : List[str] = self.only_cross_attention if isinstance(snake_case__ , snake_case__ ): snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(snake_case__ , snake_case__ ): snake_case : List[Any] = (num_attention_heads,) * len(self.down_block_types ) # down snake_case : List[Any] = [] snake_case : Optional[int] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): snake_case : List[Any] = output_channel snake_case : Dict = block_out_channels[i] snake_case : Optional[Any] = i == len(snake_case__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case : List[Any] = FlaxCrossAttnDownBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case : Union[str, Any] = FlaxDownBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(snake_case__ ) snake_case : Dict = down_blocks # mid snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up snake_case : Optional[Any] = [] snake_case : Optional[int] = list(reversed(snake_case__ ) ) snake_case : Dict = list(reversed(snake_case__ ) ) snake_case : Tuple = list(reversed(snake_case__ ) ) snake_case : Optional[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): snake_case : Optional[int] = output_channel snake_case : List[Any] = reversed_block_out_channels[i] snake_case : Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(snake_case__ ) - 1 )] snake_case : int = i == len(snake_case__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": snake_case : Any = FlaxCrossAttnUpBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case : Optional[int] = FlaxUpBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(snake_case__ ) snake_case : Optional[int] = output_channel snake_case : Tuple = up_blocks # out snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) snake_case : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : bool = True , snake_case__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(snake_case__ , jnp.ndarray ): snake_case : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case : Any = timesteps.astype(dtype=jnp.floataa ) snake_case : int = jnp.expand_dims(snake_case__ , 0 ) snake_case : str = self.time_proj(snake_case__ ) snake_case : str = self.time_embedding(snake_case__ ) # 2. pre-process snake_case : int = jnp.transpose(snake_case__ , (0, 2, 3, 1) ) snake_case : List[Any] = self.conv_in(snake_case__ ) # 3. down snake_case : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(snake_case__ , snake_case__ ): snake_case , snake_case : List[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train ) else: snake_case , snake_case : str = down_block(snake_case__ , snake_case__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: snake_case : Tuple = () for down_block_res_sample, down_block_additional_residual in zip( snake_case__ , snake_case__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) snake_case : Optional[int] = new_down_block_res_samples # 4. mid snake_case : Optional[int] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: snake_case : int = down_block_res_samples[-(self.layers_per_block + 1) :] snake_case : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(snake_case__ , snake_case__ ): snake_case : Optional[Any] = up_block( snake_case__ , temb=snake_case__ , encoder_hidden_states=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train , ) else: snake_case : Dict = up_block(snake_case__ , temb=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train ) # 6. post-process snake_case : List[str] = self.conv_norm_out(snake_case__ ) snake_case : Any = nn.silu(snake_case__ ) snake_case : Optional[int] = self.conv_out(snake_case__ ) snake_case : Union[str, Any] = jnp.transpose(snake_case__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=snake_case__ )
59
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Optional[int] ='▁' SCREAMING_SNAKE_CASE_: List[str] ={'vocab_file': 'sentencepiece.bpe.model'} SCREAMING_SNAKE_CASE_: Dict ={ 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model' ), } } SCREAMING_SNAKE_CASE_: Tuple ={ 'facebook/nllb-200-distilled-600M': 10_24, } # fmt: off SCREAMING_SNAKE_CASE_: Union[str, Any] =['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class __A ( A_ ): a__ : Any = VOCAB_FILES_NAMES a__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP a__ : int = ["input_ids", "attention_mask"] a__ : List[int] = [] a__ : List[int] = [] def __init__(self : Optional[int] , __a : str , __a : int="<s>" , __a : int="</s>" , __a : Tuple="</s>" , __a : Optional[Any]="<s>" , __a : Tuple="<unk>" , __a : Tuple="<pad>" , __a : Union[str, Any]="<mask>" , __a : int=None , __a : Tuple=None , __a : Tuple=None , __a : Optional[Dict[str, Any]] = None , __a : Tuple=None , __a : Dict=False , **__a : Optional[int] , ): UpperCAmelCase_ = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase_ = legacy_behaviour super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , tokenizer_file=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=snake_case__ , **snake_case__ , ) UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(snake_case__ ) ) UpperCAmelCase_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token UpperCAmelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCAmelCase_ = 1 UpperCAmelCase_ = len(self.sp_model ) UpperCAmelCase_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case__ ) } UpperCAmelCase_ = {v: k for k, v in self.lang_code_to_id.items()} UpperCAmelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} UpperCAmelCase_ = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) UpperCAmelCase_ = src_lang if src_lang is not None else "eng_Latn" UpperCAmelCase_ = self.lang_code_to_id[self._src_lang] UpperCAmelCase_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__(self : Optional[int] ): UpperCAmelCase_ = self.__dict__.copy() UpperCAmelCase_ = None UpperCAmelCase_ = self.sp_model.serialized_model_proto() return state def __setstate__(self : Optional[Any] , __a : Tuple ): UpperCAmelCase_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCAmelCase_ = {} UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _lowercase (self : Optional[Any] ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _lowercase (self : Optional[int] ): return self._src_lang @src_lang.setter def _lowercase (self : List[str] , __a : str ): UpperCAmelCase_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowercase (self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) UpperCAmelCase_ = [1] * len(self.prefix_tokens ) UpperCAmelCase_ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones def _lowercase (self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowercase (self : int , __a : List[int] , __a : Optional[List[int]] = None ): UpperCAmelCase_ = [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase (self : Any , __a : int , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Tuple ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCAmelCase_ = src_lang UpperCAmelCase_ = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) UpperCAmelCase_ = self.convert_tokens_to_ids(snake_case__ ) UpperCAmelCase_ = tgt_lang_id return inputs def _lowercase (self : Any ): UpperCAmelCase_ = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowercase (self : List[str] , __a : str ): return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def _lowercase (self : Any , __a : Any ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCAmelCase_ = self.sp_model.PieceToId(snake_case__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowercase (self : List[str] , __a : Tuple ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowercase (self : Dict , __a : Optional[Any] ): UpperCAmelCase_ = "".join(snake_case__ ).replace(snake_case__ , " " ).strip() return out_string def _lowercase (self : Optional[int] , __a : str , __a : Optional[str] = None ): if not os.path.isdir(snake_case__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , "wb" ) as fi: UpperCAmelCase_ = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,) def _lowercase (self : List[Any] , __a : List[str] , __a : str = "eng_Latn" , __a : Optional[List[str]] = None , __a : str = "fra_Latn" , **__a : int , ): UpperCAmelCase_ = src_lang UpperCAmelCase_ = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def _lowercase (self : Optional[int] ): return self.set_src_lang_special_tokens(self.src_lang ) def _lowercase (self : str ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowercase (self : Optional[Any] , __a : str ): UpperCAmelCase_ = self.lang_code_to_id[src_lang] if self.legacy_behaviour: UpperCAmelCase_ = [] UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase_ = [self.cur_lang_code] UpperCAmelCase_ = [self.eos_token_id] def _lowercase (self : List[str] , __a : str ): UpperCAmelCase_ = self.lang_code_to_id[lang] if self.legacy_behaviour: UpperCAmelCase_ = [] UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase_ = [self.cur_lang_code] UpperCAmelCase_ = [self.eos_token_id]
1
__lowerCamelCase = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.35_5818, } def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case : List[Any] = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {', '.join(__lowerCamelCase )}""" ) raise ValueError(__lowerCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
59
0
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __A =TypeVar('''T''') def lowerCamelCase_ ( lowerCamelCase__ ): return (position - 1) // 2 def lowerCamelCase_ ( lowerCamelCase__ ): return (2 * position) + 1 def lowerCamelCase_ ( lowerCamelCase__ ): return (2 * position) + 2 class _SCREAMING_SNAKE_CASE ( Generic[T] ): def __init__( self ) -> None: lowerCamelCase_ = [] lowerCamelCase_ = {} lowerCamelCase_ = 0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def SCREAMING_SNAKE_CASE_( self ) -> bool: return self.elements == 0 def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None: self.heap.append((elem, weight) ) lowerCamelCase_ = self.elements self.elements += 1 self._bubble_up(snake_case__ ) def SCREAMING_SNAKE_CASE_( self ) -> T: if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) lowerCamelCase_ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: lowerCamelCase_ = self.heap[0] self._bubble_down(snake_case__ ) return elem def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None: lowerCamelCase_ = self.position_map[elem] lowerCamelCase_ = (elem, weight) if position > 0: lowerCamelCase_ = get_parent_position(snake_case__ ) lowerCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._bubble_up(snake_case__ ) else: self._bubble_down(snake_case__ ) else: self._bubble_down(snake_case__ ) def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None: lowerCamelCase_ = self.position_map[elem] if curr_pos == 0: return None lowerCamelCase_ = get_parent_position(snake_case__ ) lowerCamelCase_ = self.heap[curr_pos] lowerCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(snake_case__ , snake_case__ ) return self._bubble_up(snake_case__ ) return None def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None: lowerCamelCase_ = self.position_map[elem] lowerCamelCase_ = self.heap[curr_pos] lowerCamelCase_ = get_child_left_position(snake_case__ ) lowerCamelCase_ = get_child_right_position(snake_case__ ) if child_left_position < self.elements and child_right_position < self.elements: lowerCamelCase_ = self.heap[child_left_position] lowerCamelCase_ = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(snake_case__ , snake_case__ ) return self._bubble_down(snake_case__ ) if child_left_position < self.elements: lowerCamelCase_ = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(snake_case__ , snake_case__ ) return self._bubble_down(snake_case__ ) else: return None if child_right_position < self.elements: lowerCamelCase_ = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(snake_case__ , snake_case__ ) return self._bubble_down(snake_case__ ) return None def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None: lowerCamelCase_ = self.heap[nodea_pos][0] lowerCamelCase_ = self.heap[nodea_pos][0] lowerCamelCase_ = ( self.heap[nodea_pos], self.heap[nodea_pos], ) lowerCamelCase_ = nodea_pos lowerCamelCase_ = nodea_pos class _SCREAMING_SNAKE_CASE ( Generic[T] ): def __init__( self ) -> None: lowerCamelCase_ = {} lowerCamelCase_ = 0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None: if node not in self.connections: lowerCamelCase_ = {} self.nodes += 1 def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> None: self.add_node(snake_case__ ) self.add_node(snake_case__ ) lowerCamelCase_ = weight lowerCamelCase_ = weight def lowerCamelCase_ ( lowerCamelCase__ , ): lowerCamelCase_ = {node: maxsize for node in graph.connections} lowerCamelCase_ = {node: None for node in graph.connections} lowerCamelCase_ = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__lowerCamelCase , __lowerCamelCase ) if priority_queue.is_empty(): return dist, parent # initialization lowerCamelCase_ = priority_queue.extract_min() lowerCamelCase_ = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowerCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowerCamelCase , dist[neighbour] ) lowerCamelCase_ = node # running prim's algorithm while not priority_queue.is_empty(): lowerCamelCase_ = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowerCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowerCamelCase , dist[neighbour] ) lowerCamelCase_ = node return dist, parent
19
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , ): snake_case : int = {} if train_file is not None: snake_case : List[Any] = [train_file] if eval_file is not None: snake_case : Optional[int] = [eval_file] if test_file is not None: snake_case : Any = [test_file] snake_case : int = datasets.load_dataset("csv" , data_files=__lowerCamelCase ) snake_case : str = list(ds[list(files.keys() )[0]].features.keys() ) snake_case : int = features_name.pop(__lowerCamelCase ) snake_case : str = list(set(ds[list(files.keys() )[0]][label_name] ) ) snake_case : str = {label: i for i, label in enumerate(__lowerCamelCase )} snake_case : List[Any] = tokenizer.model_input_names snake_case : List[Any] = {} if len(__lowerCamelCase ) == 1: for k in files.keys(): snake_case : Tuple = ds[k].map( lambda __lowerCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) , batched=__lowerCamelCase , ) elif len(__lowerCamelCase ) == 2: for k in files.keys(): snake_case : List[Any] = ds[k].map( lambda __lowerCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) , batched=__lowerCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: snake_case : Dict = {k: v for k, v in ex.items() if k in input_names} snake_case : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: snake_case : str = {k: v for k, v in ex.items() if k in input_names} snake_case : Any = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: snake_case : str = {k: v for k, v in ex.items() if k in input_names} snake_case : List[str] = labelaid[ex[label_name]] yield (d, label) snake_case : int = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: snake_case : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) snake_case : Tuple = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: snake_case : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) snake_case : Optional[int] = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: snake_case : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCamelCase = logging.getLogger(__name__) @dataclass class UpperCAmelCase : A__ : int = field(metadata={"help": "Which column contains the label"} ) A__ : str = field(default=A_ ,metadata={"help": "The path of the training file"} ) A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the development file"} ) A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the test file"} ) A__ : int = field( default=1_28 ,metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } ,) A__ : bool = field( default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} ) @dataclass class UpperCAmelCase : A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. A__ : Optional[str] = field( default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,) def UpperCamelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) snake_case , snake_case , snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case , snake_case , snake_case , snake_case : Tuple = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) snake_case : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): snake_case : int = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(__lowerCamelCase : EvalPrediction ) -> Dict: snake_case : Optional[int] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer snake_case : int = TFTrainer( model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case : int = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) snake_case : Any = trainer.evaluate() snake_case : List[Any] = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__lowerCamelCase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(__lowerCamelCase ) return results if __name__ == "__main__": main()
59
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCAmelCase ( A_ ): '''simple docstring''' snake_case_ = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) snake_case_ = "CIDAS/clipseg-rd64-refined" snake_case_ = "image_segmenter" snake_case_ = CLIPSegForImageSegmentation snake_case_ = ["image", "text"] snake_case_ = ["image"] def __init__( self : Tuple ,*A : List[str] ,**A : str ): requires_backends(self ,["vision"] ) super().__init__(*snake_case__ ,**snake_case__ ) def UpperCamelCase_ ( self : Union[str, Any] ,A : "Image" ,A : str ): return self.pre_processor(text=[label] ,images=[image] ,padding=snake_case__ ,return_tensors="pt" ) def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ): with torch.no_grad(): __A = self.model(**snake_case__ ).logits return logits def UpperCamelCase_ ( self : Optional[int] ,A : Dict ): __A = outputs.cpu().detach().numpy() __A = 0 __A = 1 return Image.fromarray((array * 2_55).astype(np.uinta ) )
15
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]: '''simple docstring''' snake_case : int = tempfile.mkdtemp() # fmt: off snake_case : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) snake_case : int = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } snake_case : Optional[Any] = os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : str ) -> Optional[int]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : List[str] ) -> int: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> str: '''simple docstring''' snake_case : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] snake_case : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = self.get_tokenizer() snake_case : Optional[Any] = self.get_image_processor() snake_case : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor.save_pretrained(self.tmpdirname ) snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]: '''simple docstring''' snake_case : str = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) snake_case : Tuple = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) snake_case : List[str] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int: '''simple docstring''' snake_case : str = self.get_image_processor() snake_case : Optional[int] = self.get_tokenizer() snake_case : List[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Optional[Any] = self.prepare_image_inputs() snake_case : str = image_processor(snake_case__ , return_tensors="np" ) snake_case : Any = processor(images=snake_case__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Dict = self.get_image_processor() snake_case : int = self.get_tokenizer() snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Tuple = "lower newer" snake_case : Tuple = processor(text=snake_case__ ) snake_case : Union[str, Any] = tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]: '''simple docstring''' snake_case : List[Any] = self.get_image_processor() snake_case : Dict = self.get_tokenizer() snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : int = "lower newer" snake_case : Dict = self.prepare_image_inputs() snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(snake_case__ ): processor() def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple: '''simple docstring''' snake_case : Tuple = self.get_image_processor() snake_case : Optional[Any] = self.get_tokenizer() snake_case : Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case : List[Any] = processor.batch_decode(snake_case__ ) snake_case : Union[str, Any] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : str = self.get_image_processor() snake_case : Union[str, Any] = self.get_tokenizer() snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Optional[Any] = "lower newer" snake_case : List[Any] = self.prepare_image_inputs() snake_case : Tuple = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
59
0
"""simple docstring""" import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( """kwargs, expected""" , [ ({"""num_shards""": 0, """max_num_jobs""": 1}, []), ({"""num_shards""": 1_0, """max_num_jobs""": 1}, [range(1_0 )]), ({"""num_shards""": 1_0, """max_num_jobs""": 1_0}, [range(__lowerCamelCase , i + 1 ) for i in range(1_0 )]), ({"""num_shards""": 1, """max_num_jobs""": 1_0}, [range(1 )]), ({"""num_shards""": 1_0, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]), ({"""num_shards""": 3, """max_num_jobs""": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = _distribute_shards(**__lowerCamelCase ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, max_num_jobs, expected""" , [ ({"""foo""": 0}, 1_0, [{"""foo""": 0}]), ({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]), ({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]), ({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]), ({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]), ] , ) def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> List[str]: '''simple docstring''' __UpperCAmelCase : str = _split_gen_kwargs(__lowerCamelCase , __lowerCamelCase ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, expected""" , [ ({"""foo""": 0}, 1), ({"""shards""": [0]}, 1), ({"""shards""": [0, 1, 2, 3]}, 4), ({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4), ({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4), ({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError), ] , ) def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> List[str]: '''simple docstring''' if expected is RuntimeError: with pytest.raises(__lowerCamelCase ): _number_of_shards_in_gen_kwargs(__lowerCamelCase ) else: __UpperCAmelCase : Optional[Any] = _number_of_shards_in_gen_kwargs(__lowerCamelCase ) assert out == expected
115
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
59
0
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase ( A_ , unittest.TestCase ): __lowercase = ConsistencyModelPipeline __lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt __lowercase = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def UpperCAmelCase_ ( self :Dict )-> Union[str, Any]: A__ = UNetaDModel.from_pretrained( "diffusers/consistency-models-test" , subfolder="test_unet" , ) return unet @property def UpperCAmelCase_ ( self :Optional[Any] )-> Union[str, Any]: A__ = UNetaDModel.from_pretrained( "diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , ) return unet def UpperCAmelCase_ ( self :List[str] , lowercase_ :Union[str, Any]=False )-> List[Any]: if class_cond: A__ = self.dummy_cond_unet else: A__ = self.dummy_uncond_unet # Default to CM multistep sampler A__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) A__ = { "unet": unet, "scheduler": scheduler, } return components def UpperCAmelCase_ ( self :str , lowercase_ :Any , lowercase_ :Optional[Any]=0 )-> Optional[Any]: if str(snake_case__ ).startswith("mps" ): A__ = torch.manual_seed(snake_case__ ) else: A__ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) A__ = { "batch_size": 1, "num_inference_steps": None, "timesteps": [22, 0], "generator": generator, "output_type": "np", } return inputs def UpperCAmelCase_ ( self :Optional[int] )-> str: A__ = "cpu" # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = ConsistencyModelPipeline(**snake_case__ ) A__ = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A__ = self.get_dummy_inputs(snake_case__ ) A__ = pipe(**snake_case__ ).images assert image.shape == (1, 32, 32, 3) A__ = image[0, -3:, -3:, -1] A__ = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase_ ( self :int )-> Any: A__ = "cpu" # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components(class_cond=snake_case__ ) A__ = ConsistencyModelPipeline(**snake_case__ ) A__ = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A__ = self.get_dummy_inputs(snake_case__ ) A__ = 0 A__ = pipe(**snake_case__ ).images assert image.shape == (1, 32, 32, 3) A__ = image[0, -3:, -3:, -1] A__ = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase_ ( self :Tuple )-> Optional[int]: A__ = "cpu" # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = ConsistencyModelPipeline(**snake_case__ ) A__ = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A__ = self.get_dummy_inputs(snake_case__ ) A__ = 1 A__ = None A__ = pipe(**snake_case__ ).images assert image.shape == (1, 32, 32, 3) A__ = image[0, -3:, -3:, -1] A__ = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase_ ( self :List[Any] )-> Any: A__ = "cpu" # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components(class_cond=snake_case__ ) A__ = ConsistencyModelPipeline(**snake_case__ ) A__ = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A__ = self.get_dummy_inputs(snake_case__ ) A__ = 1 A__ = None A__ = 0 A__ = pipe(**snake_case__ ).images assert image.shape == (1, 32, 32, 3) A__ = image[0, -3:, -3:, -1] A__ = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self :Any , lowercase_ :Union[str, Any]=0 , lowercase_ :Optional[Any]=False , lowercase_ :str="cpu" , lowercase_ :Optional[int]=torch.floataa , lowercase_ :Optional[Any]=(1, 3, 64, 64) )-> Tuple: A__ = torch.manual_seed(snake_case__ ) A__ = { "num_inference_steps": None, "timesteps": [22, 0], "class_labels": 0, "generator": generator, "output_type": "np", } if get_fixed_latents: A__ = self.get_fixed_latents(seed=snake_case__ , device=snake_case__ , dtype=snake_case__ , shape=snake_case__ ) A__ = latents return inputs def UpperCAmelCase_ ( self :Any , lowercase_ :List[Any]=0 , lowercase_ :Union[str, Any]="cpu" , lowercase_ :int=torch.floataa , lowercase_ :Any=(1, 3, 64, 64) )-> int: if type(snake_case__ ) == str: A__ = torch.device(snake_case__ ) A__ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) A__ = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ ) return latents def UpperCAmelCase_ ( self :Any )-> Union[str, Any]: A__ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) A__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) A__ = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(torch_device=snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A__ = self.get_inputs() A__ = pipe(**snake_case__ ).images assert image.shape == (1, 64, 64, 3) A__ = image[0, -3:, -3:, -1] A__ = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def UpperCAmelCase_ ( self :Optional[int] )-> Union[str, Any]: A__ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) A__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) A__ = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(torch_device=snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A__ = self.get_inputs() A__ = 1 A__ = None A__ = pipe(**snake_case__ ).images assert image.shape == (1, 64, 64, 3) A__ = image[0, -3:, -3:, -1] A__ = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def UpperCAmelCase_ ( self :Union[str, Any] )-> Union[str, Any]: A__ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) A__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) A__ = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(torch_device=snake_case__ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=snake_case__ ) A__ = self.get_inputs(get_fixed_latents=snake_case__ , device=snake_case__ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=snake_case__ , enable_math=snake_case__ , enable_mem_efficient=snake_case__ ): A__ = pipe(**snake_case__ ).images assert image.shape == (1, 64, 64, 3) A__ = image[0, -3:, -3:, -1] A__ = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def UpperCAmelCase_ ( self :str )-> List[Any]: A__ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) A__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) A__ = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(torch_device=snake_case__ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=snake_case__ ) A__ = self.get_inputs(get_fixed_latents=snake_case__ , device=snake_case__ ) A__ = 1 A__ = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=snake_case__ , enable_math=snake_case__ , enable_mem_efficient=snake_case__ ): A__ = pipe(**snake_case__ ).images assert image.shape == (1, 64, 64, 3) A__ = image[0, -3:, -3:, -1] A__ = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
237
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase : def __init__(self : Dict , snake_case__ : Dict , snake_case__ : Any=13 , snake_case__ : Any=32 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : List[Any]=16 , snake_case__ : int=[1, 2, 1] , snake_case__ : Dict=[2, 2, 4] , snake_case__ : Dict=2 , snake_case__ : Tuple=2.0 , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=True , snake_case__ : List[str]=0.02 , snake_case__ : int=1e-5 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=10 , snake_case__ : Optional[Any]=8 , snake_case__ : Any=["stage1", "stage2", "stage3"] , snake_case__ : Tuple=[1, 2, 3] , ) -> Union[str, Any]: '''simple docstring''' snake_case : Any = parent snake_case : Optional[int] = batch_size snake_case : Union[str, Any] = image_size snake_case : Dict = patch_size snake_case : Optional[Any] = num_channels snake_case : Union[str, Any] = embed_dim snake_case : int = depths snake_case : List[str] = num_heads snake_case : Union[str, Any] = window_size snake_case : Union[str, Any] = mlp_ratio snake_case : List[Any] = qkv_bias snake_case : List[Any] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Union[str, Any] = drop_path_rate snake_case : int = hidden_act snake_case : Optional[int] = use_absolute_embeddings snake_case : int = patch_norm snake_case : Union[str, Any] = layer_norm_eps snake_case : Any = initializer_range snake_case : Optional[Any] = is_training snake_case : Tuple = scope snake_case : Optional[int] = use_labels snake_case : Optional[Any] = type_sequence_label_size snake_case : Union[str, Any] = encoder_stride snake_case : Any = out_features snake_case : Tuple = out_indices def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case : int = None if self.use_labels: snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : Dict = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int: '''simple docstring''' return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) snake_case : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> str: '''simple docstring''' snake_case : Optional[int] = MaskFormerSwinBackbone(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case__ ): snake_case : Tuple = ["stem"] snake_case : List[Any] = MaskFormerSwinBackbone(config=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' snake_case : Union[str, Any] = self.prepare_config_and_inputs() snake_case , snake_case , snake_case : List[Any] = config_and_inputs snake_case : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ): A__ : List[str] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) A__ : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} A__ : Optional[Any] = False A__ : List[Any] = False A__ : List[str] = False A__ : List[str] = False A__ : Union[str, Any] = False def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : str = MaskFormerSwinModelTester(self ) snake_case : Optional[int] = ConfigTester(self , config_class=snake_case__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]: '''simple docstring''' return def _SCREAMING_SNAKE_CASE (self : Dict ) -> str: '''simple docstring''' snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Dict: '''simple docstring''' snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case__ ) @unittest.skip("Swin does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' pass @unittest.skip("Swin does not support feedforward chunking" ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : int = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : str = model_class(snake_case__ ) snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case : Optional[Any] = [*signature.parameters.keys()] snake_case : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str: '''simple docstring''' pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ) -> Optional[int]: '''simple docstring''' snake_case : Tuple = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): snake_case : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) snake_case : int = outputs.hidden_states snake_case : Union[str, Any] = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) # Swin has a different seq_length snake_case : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case : int = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Dict = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Any = 3 snake_case : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case : str = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Optional[Any] = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : int ) -> str: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case__ : Union[str, Any] ): snake_case : Any = 0 return t def check_equivalence(snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[int]={} ): with torch.no_grad(): snake_case : Optional[Any] = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ) snake_case : Tuple = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple() def recursive_check(snake_case__ : List[str] , snake_case__ : Optional[Any] ): if isinstance(snake_case__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ): recursive_check(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case__ , snake_case__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1e-5 ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has""" f""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}.""" ) , ) recursive_check(snake_case__ , snake_case__ ) for model_class in self.all_model_classes: snake_case : Optional[int] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Dict = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) snake_case : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) @require_torch class UpperCAmelCase ( unittest.TestCase ,A_ ): A__ : int = (MaskFormerSwinBackbone,) if is_torch_available() else () A__ : int = MaskFormerSwinConfig def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModelTester(self ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Optional[int] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: snake_case : Optional[int] = backbone_class(snake_case__ ) backbone.to(snake_case__ ) backbone.eval() snake_case : Union[str, Any] = backbone(**snake_case__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case : Optional[int] = backbone(**snake_case__ , output_hidden_states=snake_case__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case , snake_case , snake_case : Dict = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case : Optional[Any] = backbone(**snake_case__ , output_attentions=snake_case__ ) self.assertIsNotNone(outputs.attentions )
59
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class lowerCamelCase_ ( A_ ): '''simple docstring''' a__ : Any = "efficientnet" def __init__( self , __lowercase = 3 , __lowercase = 600 , __lowercase = 2.0 , __lowercase = 3.1 , __lowercase = 8 , __lowercase = [3, 3, 5, 3, 5, 5, 3] , __lowercase = [32, 16, 24, 40, 80, 112, 192] , __lowercase = [16, 24, 40, 80, 112, 192, 320] , __lowercase = [] , __lowercase = [1, 2, 2, 2, 1, 2, 1] , __lowercase = [1, 2, 2, 3, 3, 4, 1] , __lowercase = [1, 6, 6, 6, 6, 6, 6] , __lowercase = 0.25 , __lowercase = "swish" , __lowercase = 2_560 , __lowercase = "mean" , __lowercase = 0.02 , __lowercase = 0.0_01 , __lowercase = 0.99 , __lowercase = 0.5 , __lowercase = 0.2 , **__lowercase , ) -> Union[str, Any]: super().__init__(**snake_case__) __UpperCamelCase :List[str] = num_channels __UpperCamelCase :Optional[Any] = image_size __UpperCamelCase :Any = width_coefficient __UpperCamelCase :Any = depth_coefficient __UpperCamelCase :Optional[int] = depth_divisor __UpperCamelCase :Tuple = kernel_sizes __UpperCamelCase :Dict = in_channels __UpperCamelCase :Tuple = out_channels __UpperCamelCase :Any = depthwise_padding __UpperCamelCase :Optional[Any] = strides __UpperCamelCase :Optional[int] = num_block_repeats __UpperCamelCase :Optional[Any] = expand_ratios __UpperCamelCase :List[Any] = squeeze_expansion_ratio __UpperCamelCase :Any = hidden_act __UpperCamelCase :Dict = hidden_dim __UpperCamelCase :Optional[Any] = pooling_type __UpperCamelCase :Optional[int] = initializer_range __UpperCamelCase :Union[str, Any] = batch_norm_eps __UpperCamelCase :Dict = batch_norm_momentum __UpperCamelCase :Optional[Any] = dropout_rate __UpperCamelCase :List[str] = drop_connect_rate __UpperCamelCase :int = sum(snake_case__) * 4 class lowerCamelCase_ ( A_ ): '''simple docstring''' a__ : List[str] = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) @property def UpperCamelCase__ ( self) -> float: return 1E-5
43
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : List[str] = [] snake_case : Optional[int] = [] snake_case : Any = [] for rt in rc.restypes: snake_case : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) snake_case : str = {name: i for i, name in enumerate(__lowerCamelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) snake_case : Optional[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : List[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : int = torch.tensor( __lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) snake_case : int = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein snake_case : List[Any] = restype_atomaa_to_atomaa[protein_aatype] snake_case : str = restype_atomaa_mask[protein_aatype] snake_case : str = residx_atomaa_mask snake_case : Any = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back snake_case : List[str] = restype_atomaa_to_atomaa[protein_aatype] snake_case : List[Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask snake_case : Union[str, Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device ) for restype, restype_letter in enumerate(rc.restypes ): snake_case : Optional[int] = rc.restype_atoa[restype_letter] snake_case : Any = rc.residue_atoms[restype_name] for atom_name in atom_names: snake_case : List[Any] = rc.atom_order[atom_name] snake_case : Optional[Any] = 1 snake_case : List[Any] = restype_atomaa_mask[protein_aatype] snake_case : int = residx_atomaa_mask return protein def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : Dict = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray ) snake_case : List[str] = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) ) return out
59
0
"""simple docstring""" from __future__ import annotations _lowercase : Optional[Any] = 8.988E9 # units = N * m^s * C^-2 def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ): """simple docstring""" lowerCamelCase__ : Tuple =abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: lowerCamelCase__ : Any =COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: lowerCamelCase__ : Tuple =abs(__lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: lowerCamelCase__ : List[str] =abs(__lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: lowerCamelCase__ : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__lowerCamelCase )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
238
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __lowerCamelCase = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } __lowerCamelCase = { """unc-nlp/lxmert-base-uncased""": 5_12, } __lowerCamelCase = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class UpperCAmelCase ( A_ ): A__ : Any = VOCAB_FILES_NAMES A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A__ : Tuple = PRETRAINED_INIT_CONFIGURATION A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : List[Any] = LxmertTokenizer def __init__(self : Dict , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Optional[Any]="[SEP]" , snake_case__ : Optional[Any]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : Tuple="[MASK]" , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Dict , ) -> Optional[int]: '''simple docstring''' super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars ): snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) ) snake_case : str = do_lower_case snake_case : List[Any] = strip_accents snake_case : Optional[int] = tokenize_chinese_chars snake_case : int = normalizer_class(**snake_case__ ) snake_case : Optional[Any] = do_lower_case def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ) -> Any: '''simple docstring''' snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case : Optional[Any] = [self.sep_token_id] snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' snake_case : List[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
59
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCamelCase__ : Any = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = val def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> List[str]: SCREAMING_SNAKE_CASE_ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: SCREAMING_SNAKE_CASE_ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) SCREAMING_SNAKE_CASE_ = value else: SCREAMING_SNAKE_CASE_ = value return new_state_dict def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : str=False ) -> List[Any]: SCREAMING_SNAKE_CASE_ = "" if is_panoptic: SCREAMING_SNAKE_CASE_ = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE_ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE_ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_ = in_proj_weight[:2_56, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[:2_56] SCREAMING_SNAKE_CASE_ = in_proj_weight[2_56:5_12, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[2_56:5_12] SCREAMING_SNAKE_CASE_ = in_proj_weight[-2_56:, :] SCREAMING_SNAKE_CASE_ = in_proj_bias[-2_56:] def UpperCAmelCase_ ( ) -> Dict: SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: SCREAMING_SNAKE_CASE_ = "resnet101" if "dc5" in model_name: SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = "panoptic" in model_name if is_panoptic: SCREAMING_SNAKE_CASE_ = 2_50 else: SCREAMING_SNAKE_CASE_ = 91 SCREAMING_SNAKE_CASE_ = "huggingface/label-files" SCREAMING_SNAKE_CASE_ = "coco-detection-id2label.json" SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} # load image processor SCREAMING_SNAKE_CASE_ = "coco_panoptic" if is_panoptic else "coco_detection" SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessor(format=__lowerCamelCase ) # prepare image SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=__lowerCamelCase , return_tensors='pt' ) SCREAMING_SNAKE_CASE_ = encoding["pixel_values"] logger.info(f"Converting model {model_name}..." ) # load original model from torch hub SCREAMING_SNAKE_CASE_ = torch.hub.load('DeppMeng/ConditionalDETR' , __lowerCamelCase , pretrained=__lowerCamelCase ).eval() SCREAMING_SNAKE_CASE_ = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: SCREAMING_SNAKE_CASE_ = "conditional_detr." + src rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE_ = rename_backbone_keys(__lowerCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(__lowerCamelCase , is_panoptic=__lowerCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE_ = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('conditional_detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE_ = ConditionalDetrForSegmentation(__lowerCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() model.push_to_hub(repo_id=__lowerCamelCase , organization='DepuMeng' , commit_message='Add model' ) # verify our conversion SCREAMING_SNAKE_CASE_ = conditional_detr(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 ) # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) lowerCamelCase__ : List[Any] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
225
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase ( A_ ): A__ : Dict = (DDIMParallelScheduler,) A__ : Tuple = (("eta", 0.0), ("num_inference_steps", 50)) def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case : Any = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**snake_case__ ) return config def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Optional[int] ) -> Any: '''simple docstring''' snake_case : List[Any] = self.scheduler_classes[0] snake_case : Any = self.get_scheduler_config(**snake_case__ ) snake_case : Any = scheduler_class(**snake_case__ ) snake_case , snake_case : Union[str, Any] = 10, 0.0 snake_case : List[Any] = self.dummy_model() snake_case : Any = self.dummy_sample_deter scheduler.set_timesteps(snake_case__ ) for t in scheduler.timesteps: snake_case : Optional[int] = model(snake_case__ , snake_case__ ) snake_case : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample return sample def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str: '''simple docstring''' for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=snake_case__ ) snake_case : Optional[int] = self.scheduler_classes[0] snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 ) snake_case : Union[str, Any] = scheduler_class(**snake_case__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : str ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]: '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any: '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]: '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=snake_case__ , eta=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case : Dict = self.scheduler_classes[0] snake_case : Tuple = self.get_scheduler_config() snake_case : Dict = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5 def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict: '''simple docstring''' snake_case : Union[str, Any] = self.scheduler_classes[0] snake_case : List[Any] = self.get_scheduler_config() snake_case : int = scheduler_class(**snake_case__ ) snake_case , snake_case : Any = 10, 0.0 scheduler.set_timesteps(snake_case__ ) snake_case : Optional[Any] = self.dummy_model() snake_case : str = self.dummy_sample_deter snake_case : Dict = self.dummy_sample_deter + 0.1 snake_case : Dict = self.dummy_sample_deter - 0.1 snake_case : Optional[Any] = samplea.shape[0] snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 ) snake_case : Tuple = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ ) snake_case : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) snake_case : List[str] = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ ) snake_case : Dict = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case : List[Any] = self.full_loop() snake_case : Optional[Any] = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.223967 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = self.full_loop(prediction_type="v_prediction" ) snake_case : int = torch.sum(torch.abs(snake_case__ ) ) snake_case : Optional[int] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]: '''simple docstring''' snake_case : Dict = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) snake_case : str = torch.sum(torch.abs(snake_case__ ) ) snake_case : Optional[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]: '''simple docstring''' snake_case : int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) snake_case : Tuple = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
59
0
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets lowerCAmelCase__ : List[Any] = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n' lowerCAmelCase__ : Union[str, Any] = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' lowerCAmelCase__ : Union[str, Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def a_ ( lowerCamelCase ): def remove_articles(lowerCamelCase ): UpperCAmelCase__ = re.compile(r'\b(a|an|the)\b' , re.UNICODE ) return re.sub(__lowerCamelCase , ' ' , __lowerCamelCase ) def white_space_fix(lowerCamelCase ): return " ".join(text.split() ) def remove_punc(lowerCamelCase ): UpperCAmelCase__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) ) def a_ ( lowerCamelCase , lowerCamelCase ): return int(normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) ) def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = [any(compute_exact(__lowerCamelCase , __lowerCamelCase ) for ref in refs ) for pred, refs in zip(__lowerCamelCase , __lowerCamelCase )] return (sum(__lowerCamelCase ) / len(__lowerCamelCase )) * 1_0_0 def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCAmelCase__ = Counter(__lowerCamelCase ) UpperCAmelCase__ = Counter(__lowerCamelCase ) UpperCAmelCase__ = Counter() for sgram, scount in sgramcounter.items(): UpperCAmelCase__ = scount * numref UpperCAmelCase__ = Counter(__lowerCamelCase ) UpperCAmelCase__ = Counter() for cgram, ccount in cgramcounter.items(): UpperCAmelCase__ = ccount * numref # KEEP UpperCAmelCase__ = sgramcounter_rep & cgramcounter_rep UpperCAmelCase__ = keepgramcounter_rep & rgramcounter UpperCAmelCase__ = sgramcounter_rep & rgramcounter UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase__ = 1 UpperCAmelCase__ = 1 if len(__lowerCamelCase ) > 0: UpperCAmelCase__ = keeptmpscorea / len(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCAmelCase__ = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCAmelCase__ = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCAmelCase__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCAmelCase__ = sgramcounter_rep - cgramcounter_rep UpperCAmelCase__ = delgramcounter_rep - rgramcounter UpperCAmelCase__ = sgramcounter_rep - rgramcounter UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase__ = 1 if len(__lowerCamelCase ) > 0: UpperCAmelCase__ = deltmpscorea / len(__lowerCamelCase ) # ADDITION UpperCAmelCase__ = set(__lowerCamelCase ) - set(__lowerCamelCase ) UpperCAmelCase__ = set(__lowerCamelCase ) & set(__lowerCamelCase ) UpperCAmelCase__ = set(__lowerCamelCase ) - set(__lowerCamelCase ) UpperCAmelCase__ = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase__ = 1 UpperCAmelCase__ = 1 if len(__lowerCamelCase ) > 0: UpperCAmelCase__ = addtmpscore / len(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: UpperCAmelCase__ = addtmpscore / len(__lowerCamelCase ) UpperCAmelCase__ = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCAmelCase__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = len(__lowerCamelCase ) UpperCAmelCase__ = ssent.split(' ' ) UpperCAmelCase__ = csent.split(' ' ) UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] for rsent in rsents: UpperCAmelCase__ = rsent.split(' ' ) UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] ragramslist.append(__lowerCamelCase ) for i in range(0 , len(__lowerCamelCase ) - 1 ): if i < len(__lowerCamelCase ) - 1: UpperCAmelCase__ = ragrams[i] + " " + ragrams[i + 1] ragrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 2: UpperCAmelCase__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 3: UpperCAmelCase__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(__lowerCamelCase ) ragramslist.append(__lowerCamelCase ) ragramslist.append(__lowerCamelCase ) ragramslist.append(__lowerCamelCase ) for i in range(0 , len(__lowerCamelCase ) - 1 ): if i < len(__lowerCamelCase ) - 1: UpperCAmelCase__ = sagrams[i] + " " + sagrams[i + 1] sagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 2: UpperCAmelCase__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 3: UpperCAmelCase__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(__lowerCamelCase ) for i in range(0 , len(__lowerCamelCase ) - 1 ): if i < len(__lowerCamelCase ) - 1: UpperCAmelCase__ = cagrams[i] + " " + cagrams[i + 1] cagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 2: UpperCAmelCase__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 3: UpperCAmelCase__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(__lowerCamelCase ) (UpperCAmelCase__) = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) (UpperCAmelCase__) = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) (UpperCAmelCase__) = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) (UpperCAmelCase__) = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) UpperCAmelCase__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCAmelCase__ = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCAmelCase__ = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCAmelCase__ = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def a_ ( lowerCamelCase , lowerCamelCase = True , lowerCamelCase = "13a" , lowerCamelCase = True ): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCAmelCase__ = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCAmelCase__ = sacrebleu.metrics.bleu._get_tokenizer(__lowerCamelCase )()(__lowerCamelCase ) else: UpperCAmelCase__ = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCamelCase ) elif tokenizer == "moses": UpperCAmelCase__ = sacremoses.MosesTokenizer().tokenize(__lowerCamelCase , return_str=__lowerCamelCase , escape=__lowerCamelCase ) elif tokenizer == "penn": UpperCAmelCase__ = sacremoses.MosesTokenizer().penn_tokenize(__lowerCamelCase , return_str=__lowerCamelCase ) else: UpperCAmelCase__ = sentence if not return_str: UpperCAmelCase__ = normalized_sent.split() return normalized_sent def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): if not (len(__lowerCamelCase ) == len(__lowerCamelCase ) == len(__lowerCamelCase )): raise ValueError('Sources length must match predictions and references lengths.' ) UpperCAmelCase__ = 0 for src, pred, refs in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): sari_score += SARIsent(normalize(__lowerCamelCase ) , normalize(__lowerCamelCase ) , [normalize(__lowerCamelCase ) for sent in refs] ) UpperCAmelCase__ = sari_score / len(__lowerCamelCase ) return 1_0_0 * sari_score def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase="exp" , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , ): UpperCAmelCase__ = len(references[0] ) if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) UpperCAmelCase__ = [[refs[i] for refs in references] for i in range(__lowerCamelCase )] UpperCAmelCase__ = sacrebleu.corpus_bleu( __lowerCamelCase , __lowerCamelCase , smooth_method=__lowerCamelCase , smooth_value=__lowerCamelCase , force=__lowerCamelCase , lowercase=__lowerCamelCase , use_effective_order=__lowerCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def __lowerCAmelCase ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string' ,id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ), } ) ,codebase_urls=[ 'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py', 'https://github.com/cocoxu/simplification/blob/master/SARI.py', 'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py', 'https://github.com/mjpost/sacreBLEU', ] ,reference_urls=[ 'https://www.aclweb.org/anthology/Q16-1029.pdf', 'https://github.com/mjpost/sacreBLEU', 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] ,) def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ): UpperCAmelCase__ = {} result.update({'sari': compute_sari(sources=snake_case__ ,predictions=snake_case__ ,references=snake_case__ )} ) result.update({'sacrebleu': compute_sacrebleu(predictions=snake_case__ ,references=snake_case__ )} ) result.update({'exact': compute_em(predictions=snake_case__ ,references=snake_case__ )} ) return result
98
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ): snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] snake_case : int = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(__lowerCamelCase ) <= key: return input_string for position, character in enumerate(__lowerCamelCase ): snake_case : Any = position % (lowest * 2) # puts it in bounds snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__lowerCamelCase ) snake_case : List[str] = ["".join(__lowerCamelCase ) for row in temp_grid] snake_case : Tuple = "".join(__lowerCamelCase ) return output_string def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ): snake_case : Dict = [] snake_case : Union[str, Any] = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] # generates template for position in range(len(__lowerCamelCase ) ): snake_case : List[str] = position % (lowest * 2) # puts it in bounds snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) snake_case : Tuple = 0 for row in temp_grid: # fills in the characters snake_case : Union[str, Any] = input_string[counter : counter + len(__lowerCamelCase )] grid.append(list(__lowerCamelCase ) ) counter += len(__lowerCamelCase ) snake_case : str = "" # reads as zigzag for position in range(len(__lowerCamelCase ) ): snake_case : Optional[int] = position % (lowest * 2) # puts it in bounds snake_case : Tuple = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def UpperCamelCase ( __lowerCamelCase : str ): snake_case : Tuple = {} for key_guess in range(1 , len(__lowerCamelCase ) ): # tries every key snake_case : Any = decrypt(__lowerCamelCase , __lowerCamelCase ) return results if __name__ == "__main__": import doctest doctest.testmod()
59
0
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : int , lowercase_ : Optional[int]=13 , lowercase_ : List[Any]=7 , lowercase_ : Tuple=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Any=32 , lowercase_ : Optional[Any]=5 , lowercase_ : Any=4 , lowercase_ : str=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : int=50 , lowercase_ : List[Any]=0.02 , lowercase_ : Any=True , lowercase_ : List[Any]=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = parent SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size SCREAMING_SNAKE_CASE_ : Optional[int] = seq_length SCREAMING_SNAKE_CASE_ : List[Any] = is_training SCREAMING_SNAKE_CASE_ : List[Any] = use_input_mask SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : List[str] = initializer_range SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels SCREAMING_SNAKE_CASE_ : Dict = scope def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : List[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : int = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' ( SCREAMING_SNAKE_CASE_ ) : Tuple = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : Any = True SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Tuple , lowercase_ : int , **lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = BertGenerationEncoder(config=snake_case__) model.to(snake_case__) model.eval() SCREAMING_SNAKE_CASE_ : str = model(snake_case__ , attention_mask=snake_case__) SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : int = BertGenerationEncoder(config=snake_case__) model.to(snake_case__) model.eval() SCREAMING_SNAKE_CASE_ : Dict = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , **lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = True SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : Optional[int] = BertGenerationDecoder(config=snake_case__).to(snake_case__).eval() # first forward pass SCREAMING_SNAKE_CASE_ : Optional[Any] = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , ) SCREAMING_SNAKE_CASE_ : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size) SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1) SCREAMING_SNAKE_CASE_ : Tuple = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] SCREAMING_SNAKE_CASE_ : Dict = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] # select random slice SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any] , *lowercase_ : List[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = BertGenerationDecoder(snake_case__) model.to(snake_case__) model.eval() SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __UpperCamelCase = (BertGenerationDecoder,) if is_torch_available() else () __UpperCamelCase = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = BertGenerationEncoderTester(self) SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : List[Any] = "bert" self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*snake_case__) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case__) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' ( SCREAMING_SNAKE_CASE_ ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() SCREAMING_SNAKE_CASE_ : Dict = None self.model_tester.create_and_check_model_as_decoder( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*snake_case__) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') self.assertIsNotNone(snake_case__) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__)[0] SCREAMING_SNAKE_CASE_ : List[str] = torch.Size([1, 8, 1024]) self.assertEqual(output.shape , snake_case__) SCREAMING_SNAKE_CASE_ : int = torch.tensor( [[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4)) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__)[0] SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size([1, 8, 50358]) self.assertEqual(output.shape , snake_case__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor( [[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4))
91
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __lowerCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __lowerCamelCase = TaTokenizerFast __lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __lowerCamelCase = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
59
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
86
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class UpperCAmelCase : def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]: '''simple docstring''' logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) snake_case : Optional[Any] = model snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ ) snake_case : int = kwargs.get("latest_model_name" , snake_case__ ) def __call__(self : Tuple , **snake_case__ : str ) -> List[str]: '''simple docstring''' snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()} return self.model.run(snake_case__ , snake_case__ ) @staticmethod def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any: '''simple docstring''' if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) snake_case : Optional[int] = "CPUExecutionProvider" return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]: '''simple docstring''' snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name ) snake_case : str = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ ) if src_path.exists(): snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str: '''simple docstring''' if os.path.isfile(snake_case__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(snake_case__ , exist_ok=snake_case__ ) # saving model weights/files self._save_pretrained(snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple: '''simple docstring''' snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(snake_case__ ): snake_case : Any = OnnxRuntimeModel.load_model( os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ ) snake_case : Union[str, Any] = Path(snake_case__ ) # load model from hub else: # download model snake_case : Dict = hf_hub_download( repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , ) snake_case : List[Any] = Path(snake_case__ ).parent snake_case : Union[str, Any] = Path(snake_case__ ).name snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ ) return cls(model=snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = None if len(str(snake_case__ ).split("@" ) ) == 2: snake_case , snake_case : int = model_id.split("@" ) return cls._from_pretrained( model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
59
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger() @dataclass class UpperCAmelCase : A__ : nn.Module A__ : List[nn.Module] = field(default_factory=A_ ) A__ : list = field(default_factory=A_ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Tensor , snake_case__ : Tensor ) -> Optional[Any]: '''simple docstring''' snake_case : List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case__ ) def __call__(self : List[Any] , snake_case__ : Tensor ) -> List[Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case__ ) [x.remove() for x in self.handles] return self @property def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]: '''simple docstring''' return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class UpperCAmelCase : A__ : nn.Module A__ : nn.Module A__ : int = 1 A__ : List = field(default_factory=A_ ) A__ : List = field(default_factory=A_ ) A__ : bool = True def __call__(self : List[Any] , snake_case__ : Tensor ) -> Any: '''simple docstring''' snake_case : str = Tracker(self.dest )(snake_case__ ).parametrized snake_case : Optional[int] = Tracker(self.src )(snake_case__ ).parametrized snake_case : List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) ) snake_case : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) ) if len(snake_case__ ) != len(snake_case__ ) and self.raise_if_mismatch: raise Exception( f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while""" f""" destination module has {len(snake_case__ )}.""" ) for dest_m, src_m in zip(snake_case__ , snake_case__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) class UpperCAmelCase ( nn.Module ): def __init__(self : Tuple , snake_case__ : nn.Module ) -> Optional[Any]: '''simple docstring''' super().__init__() snake_case : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block" ), f"""Unexpected layer name {k}""" snake_case : Union[str, Any] = len(snake_case__ ) + 1 feature_blocks.append((f"""res{block_index}""", v) ) snake_case : Optional[Any] = nn.ModuleDict(snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tensor ) -> Dict: '''simple docstring''' return get_trunk_forward_outputs( snake_case__ , out_feat_keys=snake_case__ , feature_blocks=self._feature_blocks , ) class UpperCAmelCase ( A_ ): def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> str: '''simple docstring''' snake_case : List[Any] = x.split("-" ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__(self : Optional[int] , snake_case__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]: '''simple docstring''' if x not in self: snake_case : Dict = self.convert_name_to_timm(snake_case__ ) snake_case : Union[str, Any] = partial(lambda: (timm.create_model(snake_case__ , pretrained=snake_case__ ).eval(), None) ) else: snake_case : List[str] = super().__getitem__(snake_case__ ) return val class UpperCAmelCase ( A_ ): def __getitem__(self : Dict , snake_case__ : str ) -> Callable[[], nn.Module]: '''simple docstring''' if "seer" in x and "in1k" not in x: snake_case : str = RegNetModel else: snake_case : Optional[Any] = RegNetForImageClassification return val def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Tuple[str, str]] ): for from_key, to_key in keys: snake_case : str = from_state_dict[from_key].clone() print(f"""Copied key={from_key} to={to_key}""" ) return to_state_dict def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : RegNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True , ): print(f"""Converting {name}...""" ) with torch.no_grad(): snake_case , snake_case : int = from_model_func() snake_case : str = our_model_func(__lowerCamelCase ).eval() snake_case : int = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase , raise_if_mismatch=__lowerCamelCase ) snake_case : Dict = torch.randn((1, 3, 224, 224) ) module_transfer(__lowerCamelCase ) if from_state_dict is not None: snake_case : str = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: snake_case : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] snake_case : Optional[Any] = manually_copy_vissl_head(__lowerCamelCase , our_model.state_dict() , __lowerCamelCase ) our_model.load_state_dict(__lowerCamelCase ) snake_case : Any = our_model(__lowerCamelCase , output_hidden_states=__lowerCamelCase ) snake_case : Union[str, Any] = ( our_outputs.logits if isinstance(__lowerCamelCase , __lowerCamelCase ) else our_outputs.last_hidden_state ) snake_case : Union[str, Any] = from_model(__lowerCamelCase ) snake_case : Dict = from_output[-1] if type(__lowerCamelCase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: snake_case : Any = our_outputs.hidden_states[-1] assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowerCamelCase , ) snake_case : List[str] = 224 if "seer" not in name else 384 # we can use the convnext one snake_case : int = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowerCamelCase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowerCamelCase , ) print(f"""Pushed {name}""" ) def UpperCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ): snake_case : Union[str, Any] = "imagenet-1k-id2label.json" snake_case : List[str] = 1000 snake_case : List[str] = (1, num_labels) snake_case : Any = "huggingface/label-files" snake_case : List[str] = num_labels snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) ) snake_case : List[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} snake_case : str = idalabel snake_case : List[Any] = {v: k for k, v in idalabel.items()} snake_case : Dict = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) snake_case : Optional[Any] = { "regnet-x-002": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ), "regnet-x-004": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ), "regnet-x-006": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ), "regnet-x-008": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ), "regnet-x-016": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ), "regnet-x-032": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ), "regnet-x-040": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ), "regnet-x-064": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ), "regnet-x-080": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ), "regnet-x-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ), "regnet-x-160": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ), "regnet-x-320": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ), # y variant "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), "regnet-y-004": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), "regnet-y-006": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), "regnet-y-008": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), "regnet-y-016": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), "regnet-y-032": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), "regnet-y-040": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), "regnet-y-064": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), "regnet-y-080": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), "regnet-y-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), "regnet-y-160": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), "regnet-y-320": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer": RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer": RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } snake_case : Union[str, Any] = NameToOurModelFuncMap() snake_case : str = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(__lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]: snake_case : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , model_dir=str(__lowerCamelCase ) , map_location="cpu" ) snake_case : Dict = model_func() # check if we have a head, if yes add it snake_case : str = files["classy_state_dict"]["base_model"]["model"] snake_case : Dict = model_state_dict["trunk"] model.load_state_dict(__lowerCamelCase ) return model.eval(), model_state_dict["heads"] # pretrained snake_case : List[Any] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : Optional[int] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : List[str] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case : Tuple = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned snake_case : List[Any] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : Tuple = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : str = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case : Dict = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( __lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( __lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported regnet* architecture,""" """ currently: regnetx-*, regnety-*. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
59
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A ={ '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
19
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def UpperCamelCase ( __lowerCamelCase : List[Any] ): return 1.0 / (1.0 + np.exp(-_outputs )) def UpperCamelCase ( __lowerCamelCase : int ): snake_case : Tuple = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase ) snake_case : int = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase ) class UpperCAmelCase ( A_ ): A__ : Any = "sigmoid" A__ : str = "softmax" A__ : int = "none" @add_end_docstrings( A_ ,r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " ,) class UpperCAmelCase ( A_ ): A__ : int = False A__ : Union[str, Any] = ClassificationFunction.NONE def __init__(self : List[str] , **snake_case__ : int ) -> str: '''simple docstring''' super().__init__(**snake_case__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]="" , **snake_case__ : List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = tokenizer_kwargs snake_case : List[Any] = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: snake_case : Optional[int] = self.model.config.return_all_scores if isinstance(snake_case__ , snake_case__ ) or top_k is None: snake_case : List[Any] = top_k snake_case : str = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case__ , ) if return_all_scores: snake_case : List[str] = None else: snake_case : Optional[int] = 1 if isinstance(snake_case__ , snake_case__ ): snake_case : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: snake_case : Optional[int] = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self : Dict , *snake_case__ : List[str] , **snake_case__ : int ) -> Optional[int]: '''simple docstring''' snake_case : Optional[int] = super().__call__(*snake_case__ , **snake_case__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. snake_case : Tuple = "top_k" not in kwargs if isinstance(args[0] , snake_case__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , **snake_case__ : Union[str, Any] ) -> Dict[str, GenericTensor]: '''simple docstring''' snake_case : int = self.framework if isinstance(snake_case__ , snake_case__ ): return self.tokenizer(**snake_case__ , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1 and isinstance(inputs[0] , snake_case__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] ) -> int: '''simple docstring''' return self.model(**snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : Dict=1 , snake_case__ : Tuple=True ) -> str: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: snake_case : Tuple = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: snake_case : Tuple = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: snake_case : Tuple = self.model.config.function_to_apply else: snake_case : int = ClassificationFunction.NONE snake_case : Any = model_outputs["logits"][0] snake_case : List[str] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: snake_case : Optional[Any] = sigmoid(snake_case__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: snake_case : Union[str, Any] = softmax(snake_case__ ) elif function_to_apply == ClassificationFunction.NONE: snake_case : Optional[Any] = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} snake_case : Optional[int] = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case__ ) ] if not _legacy: dict_scores.sort(key=lambda snake_case__ : x["score"] , reverse=snake_case__ ) if top_k is not None: snake_case : Optional[int] = dict_scores[:top_k] return dict_scores
59
0
from __future__ import annotations def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" __A = [] __A = [] __A = 0 __A = sum(__lowerCamelCase ) create_state_space_tree(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return result def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , ) -> int: """simple docstring""" if sum(__lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(__lowerCamelCase )) < max_sum: return if sum(__lowerCamelCase ) == max_sum: result.append(__lowerCamelCase ) return for index in range(__lowerCamelCase , len(__lowerCamelCase ) ): create_state_space_tree( __lowerCamelCase , __lowerCamelCase , index + 1 , [*path, nums[index]] , __lowerCamelCase , remaining_nums_sum - nums[index] , ) SCREAMING_SNAKE_CASE :Any = [3, 34, 4, 12, 5, 2] SCREAMING_SNAKE_CASE :List[str] = 9 SCREAMING_SNAKE_CASE :Optional[int] = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
15
from __future__ import annotations __lowerCamelCase = list[list[int]] # assigning initial values to the grid __lowerCamelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __lowerCamelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def UpperCamelCase ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def UpperCamelCase ( __lowerCamelCase : Matrix ): for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def UpperCamelCase ( __lowerCamelCase : Matrix ): if location := find_empty_location(__lowerCamelCase ): snake_case , snake_case : Union[str, Any] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): snake_case : List[Any] = digit if sudoku(__lowerCamelCase ) is not None: return grid snake_case : Union[str, Any] = 0 return None def UpperCamelCase ( __lowerCamelCase : Matrix ): for row in grid: for cell in row: print(__lowerCamelCase , end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") __lowerCamelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
59
0
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : str = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __UpperCAmelCase : List[str] = result + left + right return input_list def lowerCamelCase ( _UpperCamelCase : list ) -> Tuple: '''simple docstring''' if len(__lowerCamelCase ) <= 1: return input_list __UpperCAmelCase : str = list(__lowerCamelCase ) # iteration for two-way merging __UpperCAmelCase : List[Any] = 2 while p <= len(__lowerCamelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Dict = i + p - 1 __UpperCAmelCase : List[str] = (low + high + 1) // 2 __UpperCAmelCase : Optional[int] = merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # final merge of last two parts if p * 2 >= len(__lowerCamelCase ): __UpperCAmelCase : List[str] = i __UpperCAmelCase : str = merge(__lowerCamelCase , 0 , __lowerCamelCase , len(__lowerCamelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": UpperCAmelCase : List[Any] = input('Enter numbers separated by a comma:\n').strip() if user_input == "": UpperCAmelCase : Tuple = [] else: UpperCAmelCase : List[Any] = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
115
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def UpperCamelCase ( __lowerCamelCase : np.ndarray ): return input_array.reshape((input_array.size, 1) ) def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Any = np.nan for i in range(__lowerCamelCase ): snake_case : List[str] = features[:, labels == i] snake_case : Dict = data.mean(1 ) # Centralize the data of class i snake_case : Optional[Any] = data - column_reshape(__lowerCamelCase ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(__lowerCamelCase , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Optional[Any] = features.mean(1 ) snake_case : Tuple = np.nan for i in range(__lowerCamelCase ): snake_case : Tuple = features[:, labels == i] snake_case : Tuple = data.shape[1] snake_case : List[str] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[int] = device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): # Check if the features have been loaded if features.any(): snake_case : Tuple = features.mean(1 ) # Center the dataset snake_case : List[str] = features - np.reshape(__lowerCamelCase , (data_mean.size, 1) ) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) / features.shape[1] snake_case , snake_case : Dict = np.linalg.eigh(__lowerCamelCase ) # Take all the columns in the reverse order (-1), and then takes only the first snake_case : Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space snake_case : Union[str, Any] = np.dot(filtered_eigenvectors.T , __lowerCamelCase ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ): assert classes > dimensions # Check if features have been already loaded if features.any: snake_case , snake_case : str = eigh( covariance_between_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , covariance_within_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , ) snake_case : str = eigenvectors[:, ::-1][:, :dimensions] snake_case , snake_case , snake_case : int = np.linalg.svd(__lowerCamelCase ) snake_case : List[Any] = svd_matrix[:, 0:dimensions] snake_case : Optional[Any] = np.dot(filtered_svd_matrix.T , __lowerCamelCase ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( ): # Create dummy dataset with 2 classes and 3 features snake_case : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) snake_case : Union[str, Any] = np.array([0, 0, 0, 1, 1] ) snake_case : List[Any] = 2 snake_case : Any = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(__lowerCamelCase ) as error_info: snake_case : str = linear_discriminant_analysis( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if isinstance(__lowerCamelCase , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def UpperCamelCase ( ): snake_case : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) snake_case : List[str] = 2 snake_case : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(__lowerCamelCase ) as error_info: snake_case : Union[str, Any] = principal_component_analysis(__lowerCamelCase , __lowerCamelCase ) if not np.allclose(__lowerCamelCase , __lowerCamelCase ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
59
0
'''simple docstring''' import pickle import numpy as np from matplotlib import pyplot as plt class UpperCAmelCase : def __init__( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :int=0.2 , lowercase_ :Any=0.2 )-> Optional[Any]: A__ = bp_numa A__ = bp_numa A__ = bp_numa A__ = conva_get[:2] A__ = conva_get[2] A__ = size_pa A__ = rate_w A__ = rate_t A__ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] A__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) A__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) A__ = -2 * np.random.rand(self.conva[1] ) + 1 A__ = -2 * np.random.rand(self.num_bpa ) + 1 A__ = -2 * np.random.rand(self.num_bpa ) + 1 def UpperCAmelCase_ ( self :List[str] , lowercase_ :Any )-> Tuple: A__ = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(snake_case__ , "wb" ) as f: pickle.dump(snake_case__ , snake_case__ ) print(F"Model saved: {save_path}" ) @classmethod def UpperCAmelCase_ ( cls :Dict , lowercase_ :str )-> Union[str, Any]: with open(snake_case__ , "rb" ) as f: A__ = pickle.load(snake_case__ ) # noqa: S301 A__ = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) A__ = model_dic.get("size_pooling1" ) A__ = model_dic.get("num_bp1" ) A__ = model_dic.get("num_bp2" ) A__ = model_dic.get("num_bp3" ) A__ = model_dic.get("rate_weight" ) A__ = model_dic.get("rate_thre" ) # create model instance A__ = CNN(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # modify model parameter A__ = model_dic.get("w_conv1" ) A__ = model_dic.get("wkj" ) A__ = model_dic.get("vji" ) A__ = model_dic.get("thre_conv1" ) A__ = model_dic.get("thre_bp2" ) A__ = model_dic.get("thre_bp3" ) return conv_ins def UpperCAmelCase_ ( self :str , lowercase_ :int )-> List[str]: return 1 / (1 + np.exp(-1 * x )) def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] )-> List[str]: return round(snake_case__ , 3 ) def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :int , lowercase_ :Optional[int] )-> Any: A__ = convs[0] A__ = convs[1] A__ = np.shape(snake_case__ )[0] # get the data slice of original image data, data_focus A__ = [] for i_focus in range(0 , size_data - size_conv + 1 , snake_case__ ): for j_focus in range(0 , size_data - size_conv + 1 , snake_case__ ): A__ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(snake_case__ ) # calculate the feature map of every single kernel, and saved as list of matrix A__ = [] A__ = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(snake_case__ ): A__ = [] for i_focus in range(len(snake_case__ ) ): A__ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(snake_case__ ) ) A__ = np.asmatrix(snake_case__ ).reshape( snake_case__ , snake_case__ ) data_featuremap.append(snake_case__ ) # expanding the data slice to One dimenssion A__ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(snake_case__ ) ) A__ = np.asarray(snake_case__ ) return focus_list, data_featuremap def UpperCAmelCase_ ( self :List[str] , lowercase_ :Dict , lowercase_ :List[str] , lowercase_ :List[str]="average_pool" )-> List[str]: A__ = len(featuremaps[0] ) A__ = int(size_map / size_pooling ) A__ = [] for i_map in range(len(snake_case__ ) ): A__ = featuremaps[i_map] A__ = [] for i_focus in range(0 , snake_case__ , snake_case__ ): for j_focus in range(0 , snake_case__ , snake_case__ ): A__ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(snake_case__ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(snake_case__ ) ) A__ = np.asmatrix(snake_case__ ).reshape(snake_case__ , snake_case__ ) featuremap_pooled.append(snake_case__ ) return featuremap_pooled def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] )-> List[str]: A__ = [] for i in range(len(snake_case__ ) ): A__ = np.shape(data[i] ) A__ = data[i].reshape(1 , shapes[0] * shapes[1] ) A__ = data_listed.getA().tolist()[0] data_expanded.extend(snake_case__ ) A__ = np.asarray(snake_case__ ) return data_expanded def UpperCAmelCase_ ( self :List[Any] , lowercase_ :List[str] )-> Optional[int]: A__ = np.asarray(snake_case__ ) A__ = np.shape(snake_case__ ) A__ = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def UpperCAmelCase_ ( self :Tuple , lowercase_ :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] )-> Tuple: A__ = [] A__ = 0 for i_map in range(snake_case__ ): A__ = np.ones((size_map, size_map) ) for i in range(0 , snake_case__ , snake_case__ ): for j in range(0 , snake_case__ , snake_case__ ): A__ = pd_pool[ i_pool ] A__ = i_pool + 1 A__ = np.multiply( snake_case__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(snake_case__ ) return pd_all def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :List[Any] , lowercase_ :Any , lowercase_ :Optional[int] , lowercase_ :Any=bool )-> str: print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(snake_case__ )) ) print((" - - Shape: Teach_Data ", np.shape(snake_case__ )) ) A__ = 0 A__ = [] A__ = 1_00_00 while rp < n_repeat and mse >= error_accuracy: A__ = 0 print(F"-------------Learning Time {rp}--------------" ) for p in range(len(snake_case__ ) ): # print('------------Learning Image: %d--------------'%p) A__ = np.asmatrix(datas_train[p] ) A__ = np.asarray(datas_teach[p] ) A__ = self.convolute( snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) A__ = self.pooling(snake_case__ , self.size_poolinga ) A__ = np.shape(snake_case__ ) A__ = self._expand(snake_case__ ) A__ = data_bp_input A__ = np.dot(snake_case__ , self.vji.T ) - self.thre_bpa A__ = self.sig(snake_case__ ) A__ = np.dot(snake_case__ , self.wkj.T ) - self.thre_bpa A__ = self.sig(snake_case__ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- A__ = np.multiply( (data_teach - bp_outa) , np.multiply(snake_case__ , (1 - bp_outa) ) ) A__ = np.multiply( np.dot(snake_case__ , self.wkj ) , np.multiply(snake_case__ , (1 - bp_outa) ) ) A__ = np.dot(snake_case__ , self.vji ) A__ = pd_i_all / (self.size_poolinga * self.size_poolinga) A__ = pd_conva_pooled.T.getA().tolist() A__ = self._calculate_gradient_from_pool( snake_case__ , snake_case__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): A__ = self._expand_mat(pd_conva_all[k_conv] ) A__ = self.rate_weight * np.dot(snake_case__ , snake_case__ ) A__ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) A__ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer A__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight A__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight A__ = self.thre_bpa - pd_k_all * self.rate_thre A__ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image A__ = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) A__ = rp + 1 A__ = error_count / patterns all_mse.append(snake_case__ ) def draw_error(): A__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(snake_case__ , "+-" ) plt.plot(snake_case__ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(snake_case__ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}") ) if draw_e: draw_error() return mse def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] )-> Optional[int]: A__ = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(snake_case__ )) ) for p in range(len(snake_case__ ) ): A__ = np.asmatrix(datas_test[p] ) A__ = self.convolute( snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) A__ = self.pooling(snake_case__ , self.size_poolinga ) A__ = self._expand(snake_case__ ) A__ = data_bp_input A__ = bp_outa * self.vji.T - self.thre_bpa A__ = self.sig(snake_case__ ) A__ = bp_outa * self.wkj.T - self.thre_bpa A__ = self.sig(snake_case__ ) produce_out.extend(bp_outa.getA().tolist() ) A__ = [list(map(self.do_round , snake_case__ ) ) for each in produce_out] return np.asarray(snake_case__ ) def UpperCAmelCase_ ( self :Dict , lowercase_ :Dict )-> Dict: A__ = np.asmatrix(snake_case__ ) A__ = self.convolute( snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) A__ = self.pooling(snake_case__ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
237
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def UpperCamelCase ( __lowerCamelCase : Optional[int] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def UpperCamelCase ( __lowerCamelCase : str ): class UpperCAmelCase : def __init__(self : Optional[int] , snake_case__ : str ) -> Any: '''simple docstring''' snake_case : List[str] = metric_id class UpperCAmelCase : A__ : List[str] = [MetricMock(A_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]: '''simple docstring''' return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Any ): if "tmp_path" in args: snake_case : str = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(__lowerCamelCase , match="https://huggingface.co/docs/evaluate" ): func(*__lowerCamelCase )
59
0
import requests __lowercase = '''YOUR API KEY''' def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = giphy_api_key ): '''simple docstring''' __UpperCamelCase :Tuple = "+".join(query.split() ) __UpperCamelCase :Union[str, Any] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}""" __UpperCamelCase :Optional[Any] = requests.get(__lowerCamelCase ).json()["data"] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('''\n'''.join(get_gifs('''space ship''')))
43
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __lowerCamelCase = logging.getLogger(__name__) __lowerCamelCase = """pytorch_model.bin""" @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} ,) @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "A csv or a json file containing the validation data."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "The name of the task to train on."} ,) A__ : Optional[List[str]] = dataclasses.field( default=A_ ,metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) A__ : Optional[str] = dataclasses.field( default="accuracy" ,metadata={"help": "The evaluation metric used for the task."} ) A__ : Optional[str] = dataclasses.field( default="no" ,metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } ,) A__ : Optional[int] = dataclasses.field( default=10 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,) A__ : Optional[float] = dataclasses.field( default=0.0 ,metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} ,) A__ : Optional[float] = dataclasses.field( default=0.0 ,metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} ,) A__ : Optional[int] = dataclasses.field( default=1_00 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,) A__ : Optional[int] = dataclasses.field( default=A_ ,metadata={"help": "Random seed for initialization."} ,) def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ): snake_case : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: snake_case : Optional[int] = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 snake_case : int = int(eval_result * len(__lowerCamelCase ) ) print(__lowerCamelCase ) snake_case : List[str] = dataset.sort("probability" , reverse=__lowerCamelCase ) snake_case : Tuple = dataset.select(range(__lowerCamelCase ) ) snake_case : List[Any] = dataset.remove_columns(["label", "probability"] ) snake_case : Any = dataset.rename_column("prediction" , "label" ) snake_case : str = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} ) snake_case : List[str] = dataset.shuffle(seed=args.seed ) snake_case : int = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(__lowerCamelCase , index=__lowerCamelCase ) else: dataset.to_json(__lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ): snake_case : int = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() snake_case : Dict = STModelArguments(model_name_or_path=__lowerCamelCase ) snake_case : Tuple = STDataArguments(train_file=__lowerCamelCase , infer_file=__lowerCamelCase ) snake_case : str = STTrainingArguments(output_dir=__lowerCamelCase ) snake_case : int = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__lowerCamelCase ).items(): setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for key, value in kwargs.items(): if hasattr(__lowerCamelCase , __lowerCamelCase ): setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Sanity checks snake_case : List[str] = {} snake_case : Optional[int] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None snake_case : str = args.train_file snake_case : Tuple = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None snake_case : Tuple = args.eval_file for key in data_files: snake_case : List[Any] = data_files[key].split("." )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: snake_case : Union[str, Any] = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("Creating the initial data directory for self-training..." ) snake_case : List[Any] = f"""{args.output_dir}/self-train_iter-{{}}""".format snake_case : Optional[int] = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) accelerator.wait_for_everyone() snake_case : Dict = None snake_case : Union[str, Any] = None snake_case : Tuple = 0 snake_case : List[Any] = False # Show the progress bar snake_case : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): snake_case : str = data_dir_format(__lowerCamelCase ) assert os.path.exists(__lowerCamelCase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 snake_case : Dict = os.path.join(__lowerCamelCase , "stage-1" ) snake_case : Optional[Any] = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__lowerCamelCase , __lowerCamelCase ): arguments_dict.update({key: value} ) snake_case : int = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowerCamelCase , __lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowerCamelCase ) finetune(**__lowerCamelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCamelCase ) logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowerCamelCase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data snake_case : str = os.path.join(__lowerCamelCase , "best-checkpoint" ) snake_case : Dict = os.path.join(__lowerCamelCase , "stage-2" ) # Update arguments_dict snake_case : List[str] = model_path snake_case : Optional[Any] = data_files["train"] snake_case : Optional[Any] = current_output_dir snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowerCamelCase , __lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowerCamelCase ) finetune(**__lowerCamelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCamelCase ) logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowerCamelCase ) snake_case : int = iteration snake_case : Tuple = data_dir_format(iteration + 1 ) snake_case : Tuple = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase , "best-checkpoint" ) ) snake_case : Optional[int] = config.idalabel snake_case : List[Any] = os.path.join(__lowerCamelCase , "eval_results_best-checkpoint.json" ) snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "test_results_best-checkpoint.json" ) assert os.path.exists(__lowerCamelCase ) with open(__lowerCamelCase , "r" ) as f: snake_case : Dict = float(json.load(__lowerCamelCase )[args.eval_metric] ) snake_case : Optional[int] = os.path.join(__lowerCamelCase , "infer_output_best-checkpoint.csv" ) assert os.path.exists(__lowerCamelCase ) # Loading the dataset from local csv or json files. snake_case : Optional[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"] snake_case : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"] if accelerator.is_main_process: os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(__lowerCamelCase ): shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) accelerator.wait_for_everyone() snake_case : str = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: snake_case : List[Any] = eval_result if best_iteration is None: snake_case : List[Any] = new_iteration snake_case : int = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: snake_case : int = new_iteration snake_case : Union[str, Any] = new_eval_result snake_case : str = 0 else: if new_eval_result == best_eval_result: snake_case : Any = new_iteration snake_case : Union[str, Any] = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: snake_case : Tuple = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" , __lowerCamelCase ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCamelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
59
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _lowercase : List[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __SCREAMING_SNAKE_CASE ( A_ ): '''simple docstring''' _a = ["pixel_values"] def __init__( self : List[str], lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : bool = True, **lowerCamelCase : List[Any], )-> None: super().__init__(**snake_case__ ) lowerCamelCase__ : Any =size if size is not None else {"shortest_edge": 224} lowerCamelCase__ : str =get_size_dict(snake_case__, default_to_square=snake_case__ ) lowerCamelCase__ : Dict =crop_size if crop_size is not None else {"height": 224, "width": 224} lowerCamelCase__ : Optional[int] =get_size_dict(snake_case__, default_to_square=snake_case__, param_name='''crop_size''' ) lowerCamelCase__ : List[str] =do_resize lowerCamelCase__ : List[str] =size lowerCamelCase__ : Optional[int] =resample lowerCamelCase__ : List[str] =do_center_crop lowerCamelCase__ : List[Any] =crop_size lowerCamelCase__ : Any =do_rescale lowerCamelCase__ : Union[str, Any] =rescale_factor lowerCamelCase__ : Dict =do_normalize lowerCamelCase__ : Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase__ : Union[str, Any] =image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase__ : Union[str, Any] =do_convert_rgb def snake_case ( self : List[str], lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Optional[int], )-> np.ndarray: lowerCamelCase__ : List[str] =get_size_dict(snake_case__, default_to_square=snake_case__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCamelCase__ : Dict =get_resize_output_image_size(snake_case__, size=size['''shortest_edge'''], default_to_square=snake_case__ ) return resize(snake_case__, size=snake_case__, resample=snake_case__, data_format=snake_case__, **snake_case__ ) def snake_case ( self : Tuple, lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Optional[Any], )-> np.ndarray: lowerCamelCase__ : Union[str, Any] =get_size_dict(snake_case__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(snake_case__, size=(size['''height'''], size['''width''']), data_format=snake_case__, **snake_case__ ) def snake_case ( self : Tuple, lowerCamelCase : np.ndarray, lowerCamelCase : Union[int, float], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Optional[int], )-> Any: return rescale(snake_case__, scale=snake_case__, data_format=snake_case__, **snake_case__ ) def snake_case ( self : Dict, lowerCamelCase : np.ndarray, lowerCamelCase : Union[float, List[float]], lowerCamelCase : Union[float, List[float]], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Optional[int], )-> np.ndarray: return normalize(snake_case__, mean=snake_case__, std=snake_case__, data_format=snake_case__, **snake_case__ ) def snake_case ( self : Optional[int], lowerCamelCase : ImageInput, lowerCamelCase : bool = None, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = None, lowerCamelCase : bool = None, lowerCamelCase : int = None, lowerCamelCase : bool = None, lowerCamelCase : float = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Any, )-> PIL.Image.Image: lowerCamelCase__ : Optional[int] =do_resize if do_resize is not None else self.do_resize lowerCamelCase__ : Union[str, Any] =size if size is not None else self.size lowerCamelCase__ : List[str] =get_size_dict(snake_case__, param_name='''size''', default_to_square=snake_case__ ) lowerCamelCase__ : Any =resample if resample is not None else self.resample lowerCamelCase__ : Dict =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase__ : List[Any] =crop_size if crop_size is not None else self.crop_size lowerCamelCase__ : Optional[int] =get_size_dict(snake_case__, param_name='''crop_size''', default_to_square=snake_case__ ) lowerCamelCase__ : Any =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ : str =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ : List[str] =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__ : Union[str, Any] =image_mean if image_mean is not None else self.image_mean lowerCamelCase__ : str =image_std if image_std is not None else self.image_std lowerCamelCase__ : Union[str, Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase__ : int =make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase__ : Union[str, Any] =[convert_to_rgb(snake_case__ ) for image in images] # All transformations expect numpy arrays. lowerCamelCase__ : Optional[int] =[to_numpy_array(snake_case__ ) for image in images] if do_resize: lowerCamelCase__ : Optional[Any] =[self.resize(image=snake_case__, size=snake_case__, resample=snake_case__ ) for image in images] if do_center_crop: lowerCamelCase__ : Tuple =[self.center_crop(image=snake_case__, size=snake_case__ ) for image in images] if do_rescale: lowerCamelCase__ : Optional[Any] =[self.rescale(image=snake_case__, scale=snake_case__ ) for image in images] if do_normalize: lowerCamelCase__ : int =[self.normalize(image=snake_case__, mean=snake_case__, std=snake_case__ ) for image in images] lowerCamelCase__ : str =[to_channel_dimension_format(snake_case__, snake_case__ ) for image in images] lowerCamelCase__ : Optional[Any] ={"pixel_values": images} return BatchFeature(data=snake_case__, tensor_type=snake_case__ )
238
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
59
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCamelCase__ : str = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = ['PerceiverFeatureExtractor'] lowerCamelCase__ : Optional[int] = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : List[str] = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys lowerCamelCase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
225
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase ( A_ ): A__ : List[str] = "megatron-bert" def __init__(self : Optional[int] , snake_case__ : List[str]=2_90_56 , snake_case__ : List[Any]=10_24 , snake_case__ : str=24 , snake_case__ : Tuple=16 , snake_case__ : Union[str, Any]=40_96 , snake_case__ : str="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_12 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : int=0 , snake_case__ : Tuple="absolute" , snake_case__ : Any=True , **snake_case__ : Union[str, Any] , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) snake_case : Tuple = vocab_size snake_case : str = hidden_size snake_case : str = num_hidden_layers snake_case : str = num_attention_heads snake_case : Optional[int] = hidden_act snake_case : int = intermediate_size snake_case : List[str] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Dict = max_position_embeddings snake_case : List[str] = type_vocab_size snake_case : List[str] = initializer_range snake_case : Tuple = layer_norm_eps snake_case : int = position_embedding_type snake_case : str = use_cache
59
0
"""simple docstring""" from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class snake_case : """simple docstring""" pass
98
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> List[str]: '''simple docstring''' return f"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy""" def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int: '''simple docstring''' super().tearDown() gc.collect() def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 , snake_case__ : Any=(4, 4, 64, 64) , snake_case__ : List[Any]=False ) -> int: '''simple docstring''' snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa snake_case : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return image def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> List[Any]: '''simple docstring''' snake_case : List[str] = jnp.bfloataa if fpaa else jnp.floataa snake_case : str = "bf16" if fpaa else None snake_case , snake_case : Optional[int] = FlaxUNetaDConditionModel.from_pretrained( snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ ) return model, params def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=(4, 77, 7_68) , snake_case__ : Dict=False ) -> List[str]: '''simple docstring''' snake_case : Any = jnp.bfloataa if fpaa else jnp.floataa snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ) -> List[str]: '''simple docstring''' snake_case , snake_case : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_latents(snake_case__ , fpaa=snake_case__ ) snake_case : List[str] = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ ) snake_case : Dict = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Optional[int] = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> str: '''simple docstring''' snake_case , snake_case : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ ) snake_case : List[str] = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 10_24) , fpaa=snake_case__ ) snake_case : Optional[int] = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Dict = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
59
0
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging UpperCAmelCase_ : Optional[int] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name def _A () -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = "https://pypi.org/pypi/diffusers/json" SCREAMING_SNAKE_CASE_ : List[str] = json.loads(request.urlopen(__lowerCamelCase ).read() )["releases"].keys() return sorted(__lowerCamelCase , key=lambda __a : version.Version(__lowerCamelCase ) ) def _A () -> Optional[Any]: """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__lowerCamelCase ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) SCREAMING_SNAKE_CASE_ : int = Path(__lowerCamelCase ) / "__init__.py" if not init_path.exists(): init_path.touch() def _A (__a ) -> List[Any]: """simple docstring""" init_hf_modules() SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(__lowerCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) SCREAMING_SNAKE_CASE_ : Dict = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def _A (__a ) -> List[Any]: """simple docstring""" with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE_ : Tuple = f.read() # Imports of the form `import .xxx` SCREAMING_SNAKE_CASE_ : Any = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __lowerCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__lowerCamelCase ) ) def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = False SCREAMING_SNAKE_CASE_ : str = [module_file] SCREAMING_SNAKE_CASE_ : str = [] # Let's recurse through all relative imports while not no_change: SCREAMING_SNAKE_CASE_ : int = [] for f in files_to_check: new_imports.extend(get_relative_imports(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ : Any = Path(__lowerCamelCase ).parent SCREAMING_SNAKE_CASE_ : str = [str(module_path / m ) for m in new_imports] SCREAMING_SNAKE_CASE_ : Tuple = [f for f in new_import_files if f not in all_relative_imports] SCREAMING_SNAKE_CASE_ : Optional[int] = [f'{f}.py' for f in new_import_files] SCREAMING_SNAKE_CASE_ : Optional[Any] = len(__lowerCamelCase ) == 0 all_relative_imports.extend(__lowerCamelCase ) return all_relative_imports def _A (__a ) -> int: """simple docstring""" with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE_ : int = f.read() # Imports of the form `import xxx` SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall('''^\s*import\s+(\S+)\s*$''' , __lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __lowerCamelCase , flags=re.MULTILINE ) # Only keep the top-level module SCREAMING_SNAKE_CASE_ : Dict = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all SCREAMING_SNAKE_CASE_ : Optional[Any] = list(set(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] for imp in imports: try: importlib.import_module(__lowerCamelCase ) except ImportError: missing_packages.append(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__lowerCamelCase )}. Run `pip install {" ".join(__lowerCamelCase )}`' ) return get_relative_imports(__lowerCamelCase ) def _A (__a , __a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = module_path.replace(os.path.sep , '''.''' ) SCREAMING_SNAKE_CASE_ : List[Any] = importlib.import_module(__lowerCamelCase ) if class_name is None: return find_pipeline_class(__lowerCamelCase ) return getattr(__lowerCamelCase , __lowerCamelCase ) def _A (__a ) -> int: """simple docstring""" from ..pipelines import DiffusionPipeline SCREAMING_SNAKE_CASE_ : List[str] = dict(inspect.getmembers(__lowerCamelCase , inspect.isclass ) ) SCREAMING_SNAKE_CASE_ : str = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __lowerCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) SCREAMING_SNAKE_CASE_ : Dict = cls return pipeline_class def _A (__a , __a , __a = None , __a = False , __a = False , __a = None , __a = None , __a = None , __a = False , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = str(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(__lowerCamelCase , __lowerCamelCase ) if os.path.isfile(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ : Optional[int] = module_file_or_url SCREAMING_SNAKE_CASE_ : List[str] = "local" elif pretrained_model_name_or_path.count('''/''' ) == 0: SCREAMING_SNAKE_CASE_ : Optional[Any] = get_diffusers_versions() # cut ".dev0" SCREAMING_SNAKE_CASE_ : str = "v" + ".".join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: SCREAMING_SNAKE_CASE_ : Dict = latest_version if latest_version[1:] in available_versions else "main" logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: SCREAMING_SNAKE_CASE_ : Dict = f'v{revision}' elif revision == "main": SCREAMING_SNAKE_CASE_ : str = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub SCREAMING_SNAKE_CASE_ : Any = COMMUNITY_PIPELINES_URL.format(revision=__lowerCamelCase , pipeline=__lowerCamelCase ) try: SCREAMING_SNAKE_CASE_ : List[Any] = cached_download( __lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , use_auth_token=__lowerCamelCase , ) SCREAMING_SNAKE_CASE_ : Tuple = "git" SCREAMING_SNAKE_CASE_ : Optional[int] = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached SCREAMING_SNAKE_CASE_ : List[Any] = hf_hub_download( __lowerCamelCase , __lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , use_auth_token=__lowerCamelCase , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment SCREAMING_SNAKE_CASE_ : List[str] = check_imports(__lowerCamelCase ) # Now we move the module inside our cached dynamic modules. SCREAMING_SNAKE_CASE_ : Tuple = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(__lowerCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__lowerCamelCase , submodule_path / module_file ) for module_needed in modules_needed: SCREAMING_SNAKE_CASE_ : str = f'{module_needed}.py' shutil.copy(os.path.join(__lowerCamelCase , __lowerCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE_ : Any = use_auth_token elif use_auth_token is True: SCREAMING_SNAKE_CASE_ : Dict = HfFolder.get_token() else: SCREAMING_SNAKE_CASE_ : Optional[Any] = None SCREAMING_SNAKE_CASE_ : Optional[Any] = model_info(__lowerCamelCase , revision=__lowerCamelCase , token=__lowerCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash create_dynamic_module(__lowerCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__lowerCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __lowerCamelCase , f'{module_needed}.py' , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , resume_download=__lowerCamelCase , proxies=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , local_files_only=__lowerCamelCase , ) return os.path.join(__lowerCamelCase , __lowerCamelCase ) def _A (__a , __a , __a = None , __a = None , __a = False , __a = False , __a = None , __a = None , __a = None , __a = False , **__a , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = get_cached_module_file( __lowerCamelCase , __lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , resume_download=__lowerCamelCase , proxies=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , local_files_only=__lowerCamelCase , ) return get_class_in_module(__lowerCamelCase , final_module.replace('''.py''' , '''''' ) )
91
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def UpperCamelCase ( __lowerCamelCase : Dataset , __lowerCamelCase : Dict[str, str] ): snake_case : int = args.log_outputs snake_case : Dict = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric snake_case : List[str] = load_metric("wer" ) snake_case : Tuple = load_metric("cer" ) # compute metrics snake_case : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] ) snake_case : int = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results snake_case : int = f"""WER: {wer_result}\nCER: {cer_result}""" print(__lowerCamelCase ) with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f: f.write(__lowerCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case : int = f"""log_{dataset_id}_predictions.txt""" snake_case : List[Any] = f"""log_{dataset_id}_targets.txt""" with open(__lowerCamelCase , "w" ) as p, open(__lowerCamelCase , "w" ) as t: # mapping function to write output def write_to_file(__lowerCamelCase : str , __lowerCamelCase : Optional[int] ): p.write(f"""{i}""" + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f"""{i}""" + "\n" ) t.write(batch["target"] + "\n" ) result.map(__lowerCamelCase , with_indices=__lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : str ): snake_case : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case : List[Any] = re.sub(__lowerCamelCase , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case : Optional[Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: snake_case : Dict = " ".join(text.split(__lowerCamelCase ) ) return text def UpperCamelCase ( __lowerCamelCase : int ): # load dataset snake_case : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case : Union[str, Any] = feature_extractor.sampling_rate # resample audio snake_case : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=__lowerCamelCase ) ) # load eval pipeline if args.device is None: snake_case : List[str] = 0 if torch.cuda.is_available() else -1 snake_case : str = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__lowerCamelCase : int ): snake_case : Dict = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case : str = prediction["text"] snake_case : Tuple = normalize_text(batch["sentence"] ) return batch # run inference on all examples snake_case : Dict = dataset.map(__lowerCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __lowerCamelCase = parser.parse_args() main(args)
59
0
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class A__ ( A_): def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): with open(snake_case__ , encoding='utf-8' ) as input_file: __lowerCAmelCase : Dict = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) __lowerCAmelCase : Union[str, Any] = input_file.read() __lowerCAmelCase : str = regexp.search(snake_case__ ) return match def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): with open(snake_case__ , encoding='utf-8' ) as input_file: __lowerCAmelCase : List[str] = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) __lowerCAmelCase : int = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` __lowerCAmelCase : List[Any] = regexp.finditer(snake_case__ ) __lowerCAmelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = Path('./datasets' ) __lowerCAmelCase : Union[str, Any] = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case__ ) ): raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = Path('./datasets' ) __lowerCAmelCase : Optional[int] = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case__ ) ): raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
86
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class UpperCAmelCase ( A_ ): A__ : jnp.ndarray @flax_register_to_config class UpperCAmelCase ( nn.Module ,A_ ,A_ ): A__ : int = 32 A__ : int = 4 A__ : int = 4 A__ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) A__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") A__ : Union[bool, Tuple[bool]] = False A__ : Tuple[int] = (3_20, 6_40, 12_80, 12_80) A__ : int = 2 A__ : Union[int, Tuple[int]] = 8 A__ : Optional[Union[int, Tuple[int]]] = None A__ : int = 12_80 A__ : float = 0.0 A__ : bool = False A__ : jnp.dtype = jnp.floataa A__ : bool = True A__ : int = 0 A__ : bool = False def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' snake_case : Dict = (1, self.in_channels, self.sample_size, self.sample_size) snake_case : Any = jnp.zeros(snake_case__ , dtype=jnp.floataa ) snake_case : List[str] = jnp.ones((1,) , dtype=jnp.intaa ) snake_case : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case , snake_case : Optional[int] = jax.random.split(snake_case__ ) snake_case : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng} return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"] def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple: '''simple docstring''' snake_case : str = self.block_out_channels snake_case : Optional[Any] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case : Tuple = self.num_attention_heads or self.attention_head_dim # input snake_case : Tuple = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case : Union[str, Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case : Dict = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype ) snake_case : List[str] = self.only_cross_attention if isinstance(snake_case__ , snake_case__ ): snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(snake_case__ , snake_case__ ): snake_case : List[Any] = (num_attention_heads,) * len(self.down_block_types ) # down snake_case : List[Any] = [] snake_case : Optional[int] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): snake_case : List[Any] = output_channel snake_case : Dict = block_out_channels[i] snake_case : Optional[Any] = i == len(snake_case__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case : List[Any] = FlaxCrossAttnDownBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case : Union[str, Any] = FlaxDownBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(snake_case__ ) snake_case : Dict = down_blocks # mid snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up snake_case : Optional[Any] = [] snake_case : Optional[int] = list(reversed(snake_case__ ) ) snake_case : Dict = list(reversed(snake_case__ ) ) snake_case : Tuple = list(reversed(snake_case__ ) ) snake_case : Optional[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): snake_case : Optional[int] = output_channel snake_case : List[Any] = reversed_block_out_channels[i] snake_case : Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(snake_case__ ) - 1 )] snake_case : int = i == len(snake_case__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": snake_case : Any = FlaxCrossAttnUpBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case : Optional[int] = FlaxUpBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(snake_case__ ) snake_case : Optional[int] = output_channel snake_case : Tuple = up_blocks # out snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) snake_case : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : bool = True , snake_case__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(snake_case__ , jnp.ndarray ): snake_case : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case : Any = timesteps.astype(dtype=jnp.floataa ) snake_case : int = jnp.expand_dims(snake_case__ , 0 ) snake_case : str = self.time_proj(snake_case__ ) snake_case : str = self.time_embedding(snake_case__ ) # 2. pre-process snake_case : int = jnp.transpose(snake_case__ , (0, 2, 3, 1) ) snake_case : List[Any] = self.conv_in(snake_case__ ) # 3. down snake_case : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(snake_case__ , snake_case__ ): snake_case , snake_case : List[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train ) else: snake_case , snake_case : str = down_block(snake_case__ , snake_case__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: snake_case : Tuple = () for down_block_res_sample, down_block_additional_residual in zip( snake_case__ , snake_case__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) snake_case : Optional[int] = new_down_block_res_samples # 4. mid snake_case : Optional[int] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: snake_case : int = down_block_res_samples[-(self.layers_per_block + 1) :] snake_case : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(snake_case__ , snake_case__ ): snake_case : Optional[Any] = up_block( snake_case__ , temb=snake_case__ , encoder_hidden_states=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train , ) else: snake_case : Dict = up_block(snake_case__ , temb=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train ) # 6. post-process snake_case : List[str] = self.conv_norm_out(snake_case__ ) snake_case : Any = nn.silu(snake_case__ ) snake_case : Optional[int] = self.conv_out(snake_case__ ) snake_case : Union[str, Any] = jnp.transpose(snake_case__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=snake_case__ )
59
0
'''simple docstring''' import random from typing import Any def lowerCAmelCase_ ( snake_case_ : list ) -> int: '''simple docstring''' for _ in range(len(__lowerCamelCase ) ): UpperCAmelCase_ = random.randint(0 , len(__lowerCamelCase ) - 1 ) UpperCAmelCase_ = random.randint(0 , len(__lowerCamelCase ) - 1 ) UpperCAmelCase_ = data[b], data[a] return data if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Tuple =[0, 1, 2, 3, 4, 5, 6, 7] SCREAMING_SNAKE_CASE_: Tuple =['python', 'says', 'hello', '!'] print('Fisher-Yates Shuffle:') print('List', integers, strings) print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
1
__lowerCamelCase = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.35_5818, } def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case : List[Any] = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {', '.join(__lowerCamelCase )}""" ) raise ValueError(__lowerCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
59
0
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if b == 0: return 1 if (b % 2) == 0: return actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) ) else: return a * actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if b < 0: return 1 / actual_power(__lowerCamelCase , __lowerCamelCase ) return actual_power(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": print(power(-2, -3))
19
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , ): snake_case : int = {} if train_file is not None: snake_case : List[Any] = [train_file] if eval_file is not None: snake_case : Optional[int] = [eval_file] if test_file is not None: snake_case : Any = [test_file] snake_case : int = datasets.load_dataset("csv" , data_files=__lowerCamelCase ) snake_case : str = list(ds[list(files.keys() )[0]].features.keys() ) snake_case : int = features_name.pop(__lowerCamelCase ) snake_case : str = list(set(ds[list(files.keys() )[0]][label_name] ) ) snake_case : str = {label: i for i, label in enumerate(__lowerCamelCase )} snake_case : List[Any] = tokenizer.model_input_names snake_case : List[Any] = {} if len(__lowerCamelCase ) == 1: for k in files.keys(): snake_case : Tuple = ds[k].map( lambda __lowerCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) , batched=__lowerCamelCase , ) elif len(__lowerCamelCase ) == 2: for k in files.keys(): snake_case : List[Any] = ds[k].map( lambda __lowerCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) , batched=__lowerCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: snake_case : Dict = {k: v for k, v in ex.items() if k in input_names} snake_case : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: snake_case : str = {k: v for k, v in ex.items() if k in input_names} snake_case : Any = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: snake_case : str = {k: v for k, v in ex.items() if k in input_names} snake_case : List[str] = labelaid[ex[label_name]] yield (d, label) snake_case : int = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: snake_case : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) snake_case : Tuple = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: snake_case : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) snake_case : Optional[int] = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: snake_case : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCamelCase = logging.getLogger(__name__) @dataclass class UpperCAmelCase : A__ : int = field(metadata={"help": "Which column contains the label"} ) A__ : str = field(default=A_ ,metadata={"help": "The path of the training file"} ) A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the development file"} ) A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the test file"} ) A__ : int = field( default=1_28 ,metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } ,) A__ : bool = field( default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} ) @dataclass class UpperCAmelCase : A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. A__ : Optional[str] = field( default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,) def UpperCamelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) snake_case , snake_case , snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case , snake_case , snake_case , snake_case : Tuple = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) snake_case : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): snake_case : int = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(__lowerCamelCase : EvalPrediction ) -> Dict: snake_case : Optional[int] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer snake_case : int = TFTrainer( model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case : int = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) snake_case : Any = trainer.evaluate() snake_case : List[Any] = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__lowerCamelCase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(__lowerCamelCase ) return results if __name__ == "__main__": main()
59
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE :Dict = { 'configuration_pix2struct': [ 'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Pix2StructConfig', 'Pix2StructTextConfig', 'Pix2StructVisionConfig', ], 'processing_pix2struct': ['Pix2StructProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[str] = ['Pix2StructImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Union[str, Any] = [ 'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Pix2StructPreTrainedModel', 'Pix2StructForConditionalGeneration', 'Pix2StructVisionModel', 'Pix2StructTextModel', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys SCREAMING_SNAKE_CASE :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]: '''simple docstring''' snake_case : int = tempfile.mkdtemp() # fmt: off snake_case : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) snake_case : int = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } snake_case : Optional[Any] = os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : str ) -> Optional[int]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : List[str] ) -> int: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> str: '''simple docstring''' snake_case : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] snake_case : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = self.get_tokenizer() snake_case : Optional[Any] = self.get_image_processor() snake_case : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor.save_pretrained(self.tmpdirname ) snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]: '''simple docstring''' snake_case : str = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) snake_case : Tuple = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) snake_case : List[str] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int: '''simple docstring''' snake_case : str = self.get_image_processor() snake_case : Optional[int] = self.get_tokenizer() snake_case : List[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Optional[Any] = self.prepare_image_inputs() snake_case : str = image_processor(snake_case__ , return_tensors="np" ) snake_case : Any = processor(images=snake_case__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Dict = self.get_image_processor() snake_case : int = self.get_tokenizer() snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Tuple = "lower newer" snake_case : Tuple = processor(text=snake_case__ ) snake_case : Union[str, Any] = tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]: '''simple docstring''' snake_case : List[Any] = self.get_image_processor() snake_case : Dict = self.get_tokenizer() snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : int = "lower newer" snake_case : Dict = self.prepare_image_inputs() snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(snake_case__ ): processor() def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple: '''simple docstring''' snake_case : Tuple = self.get_image_processor() snake_case : Optional[Any] = self.get_tokenizer() snake_case : Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case : List[Any] = processor.batch_decode(snake_case__ ) snake_case : Union[str, Any] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : str = self.get_image_processor() snake_case : Union[str, Any] = self.get_tokenizer() snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Optional[Any] = "lower newer" snake_case : List[Any] = self.prepare_image_inputs() snake_case : Tuple = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
59
0
"""simple docstring""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Optional[Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } UpperCAmelCase : Union[str, Any] = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : int ) -> Union[str, Any]: '''simple docstring''' for attribute in key.split(""".""" ): __UpperCAmelCase : int = getattr(__lowerCamelCase , __lowerCamelCase ) if weight_type is not None: __UpperCAmelCase : Union[str, Any] = getattr(__lowerCamelCase , __lowerCamelCase ).shape else: __UpperCAmelCase : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __UpperCAmelCase : Optional[Any] = value elif weight_type == "weight_g": __UpperCAmelCase : str = value elif weight_type == "weight_v": __UpperCAmelCase : Dict = value elif weight_type == "bias": __UpperCAmelCase : Dict = value else: __UpperCAmelCase : Tuple = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Any ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : List[Any] = fairseq_model.state_dict() __UpperCAmelCase : str = hf_model.feature_extractor for name, value in fairseq_dict.items(): __UpperCAmelCase : Tuple = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __UpperCAmelCase : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __UpperCAmelCase : Dict = True if "*" in mapped_key: __UpperCAmelCase : Union[str, Any] = name.split(__lowerCamelCase )[0].split(""".""" )[-2] __UpperCAmelCase : Dict = mapped_key.replace("""*""" , __lowerCamelCase ) if "weight_g" in name: __UpperCAmelCase : int = "weight_g" elif "weight_v" in name: __UpperCAmelCase : Dict = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: __UpperCAmelCase : Dict = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCAmelCase : List[Any] = "weight" else: __UpperCAmelCase : List[str] = None set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Any ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = full_name.split("""conv_layers.""" )[-1] __UpperCAmelCase : List[Any] = name.split(""".""" ) __UpperCAmelCase : Tuple = int(items[0] ) __UpperCAmelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __UpperCAmelCase : Optional[Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __UpperCAmelCase : List[str] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __UpperCAmelCase : str = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __UpperCAmelCase : List[str] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any]=None ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = torch.load(__lowerCamelCase ) __UpperCAmelCase : List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] ) __UpperCAmelCase : Optional[int] = WavLMOrig(__lowerCamelCase ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: __UpperCAmelCase : str = WavLMConfig.from_pretrained(__lowerCamelCase ) else: __UpperCAmelCase : Optional[int] = WavLMConfig() __UpperCAmelCase : Any = WavLMModel(__lowerCamelCase ) recursively_load_weights(__lowerCamelCase , __lowerCamelCase ) hf_wavlm.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCAmelCase : Dict = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
115
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
59
0
'''simple docstring''' import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) __lowerCAmelCase : List[Any] ="bert-base-cased" __lowerCAmelCase : Dict ="fp16" __lowerCAmelCase : Optional[int] ="bf16" __lowerCAmelCase : Tuple =[FPaa, BFaa] @require_fsdp @require_cuda class UpperCAmelCase ( A_ ): def UpperCAmelCase_ ( self :Tuple )-> int: super().setUp() A__ = dict( ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , ) def UpperCAmelCase_ ( self :int )-> Optional[int]: from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(snake_case__ ): A__ = self.dist_env.copy() A__ = F"{i + 1}" A__ = strategy with mockenv_context(**snake_case__ ): A__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def UpperCAmelCase_ ( self :Union[str, Any] )-> Any: from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(snake_case__ ): A__ = self.dist_env.copy() A__ = prefetch_policy with mockenv_context(**snake_case__ ): A__ = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def UpperCAmelCase_ ( self :List[str] )-> Tuple: from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(snake_case__ ): A__ = self.dist_env.copy() A__ = state_dict_type with mockenv_context(**snake_case__ ): A__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def UpperCAmelCase_ ( self :str )-> List[str]: A__ = AutoModel.from_pretrained(snake_case__ ) for policy in FSDP_AUTO_WRAP_POLICY: A__ = self.dist_env.copy() A__ = policy if policy == "TRANSFORMER_BASED_WRAP": A__ = "BertLayer" elif policy == "SIZE_BASED_WRAP": A__ = "2000" with mockenv_context(**snake_case__ ): A__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case__ ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) A__ = self.dist_env.copy() A__ = "TRANSFORMER_BASED_WRAP" A__ = "T5Layer" with mockenv_context(**snake_case__ ): A__ = FullyShardedDataParallelPlugin() with self.assertRaises(snake_case__ ) as cm: fsdp_plugin.set_auto_wrap_policy(snake_case__ ) self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) ) A__ = self.dist_env.copy() A__ = "SIZE_BASED_WRAP" A__ = "0" with mockenv_context(**snake_case__ ): A__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case__ ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]: from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: A__ = self.dist_env.copy() A__ = mp_dtype with mockenv_context(**snake_case__ ): A__ = Accelerator() if mp_dtype == "fp16": A__ = torch.floataa elif mp_dtype == "bf16": A__ = torch.bfloataa A__ = MixedPrecision(param_dtype=snake_case__ , reduce_dtype=snake_case__ , buffer_dtype=snake_case__ ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case__ ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , snake_case__ ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(snake_case__ ) def UpperCAmelCase_ ( self :Any )-> int: from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: A__ = self.dist_env.copy() A__ = str(snake_case__ ).lower() with mockenv_context(**snake_case__ ): A__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case__ ) ) @require_fsdp @require_multi_gpu @slow class UpperCAmelCase ( A_ ): def UpperCAmelCase_ ( self :Union[str, Any] )-> int: super().setUp() A__ = 0.8_2 A__ = [ "fsdp_shard_grad_op_transformer_based_wrap", "fsdp_full_shard_transformer_based_wrap", ] A__ = { "multi_gpu_fp16": 32_00, "fsdp_shard_grad_op_transformer_based_wrap_fp16": 20_00, "fsdp_full_shard_transformer_based_wrap_fp16": 19_00, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } A__ = 1_60 A__ = 1_60 A__ = inspect.getfile(accelerate.test_utils ) A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] ) def UpperCAmelCase_ ( self :int )-> Union[str, Any]: A__ = os.path.join(self.test_scripts_folder , "test_performance.py" ) A__ = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"] for config in self.performance_configs: A__ = cmd.copy() for i, strategy in enumerate(snake_case__ ): if strategy.lower() in config: cmd_config.append(F"--fsdp_sharding_strategy={i+1}" ) break if "fp32" in config: cmd_config.append("--mixed_precision=no" ) else: cmd_config.append("--mixed_precision=fp16" ) if "cpu_offload" in config: cmd_config.append("--fsdp_offload_params=True" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000" ) cmd_config.extend( [ self.test_file_path, F"--output_dir={self.tmpdir}", F"--performance_lower_bound={self.performance_lower_bound}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() ) def UpperCAmelCase_ ( self :str )-> Optional[Any]: A__ = os.path.join(self.test_scripts_folder , "test_checkpointing.py" ) A__ = [ "accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp", "--mixed_precision=fp16", "--fsdp_transformer_layer_cls_to_wrap=BertLayer", ] for i, strategy in enumerate(snake_case__ ): A__ = cmd.copy() cmd_config.append(F"--fsdp_sharding_strategy={i+1}" ) if strategy != "FULL_SHARD": continue A__ = len(snake_case__ ) for state_dict_type in FSDP_STATE_DICT_TYPE: A__ = cmd_config[:state_dict_config_index] cmd_config.append(F"--fsdp_state_dict_type={state_dict_type}" ) cmd_config.extend( [ self.test_file_path, F"--output_dir={self.tmpdir}", "--partial_train_epoch=1", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() ) A__ = cmd_config[:-1] A__ = os.path.join(self.tmpdir , "epoch_0" ) cmd_config.extend( [ F"--resume_from_checkpoint={resume_from_checkpoint}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() ) def UpperCAmelCase_ ( self :Optional[Any] )-> str: A__ = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" ) A__ = [ "accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): A__ = cmd.copy() if "fp16" in spec: cmd_config.extend(["--mixed_precision=fp16"] ) else: cmd_config.extend(["--mixed_precision=no"] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["--use_fsdp"] ) for i, strategy in enumerate(snake_case__ ): if strategy.lower() in spec: cmd_config.append(F"--fsdp_sharding_strategy={i+1}" ) break if "cpu_offload" in spec: cmd_config.append("--fsdp_offload_params=True" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000" ) cmd_config.extend( [ self.test_file_path, F"--output_dir={self.tmpdir}", F"--peak_memory_upper_bound={peak_mem_upper_bound}", F"--n_train={self.n_train}", F"--n_val={self.n_val}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() )
237
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase : def __init__(self : Dict , snake_case__ : Dict , snake_case__ : Any=13 , snake_case__ : Any=32 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : List[Any]=16 , snake_case__ : int=[1, 2, 1] , snake_case__ : Dict=[2, 2, 4] , snake_case__ : Dict=2 , snake_case__ : Tuple=2.0 , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=True , snake_case__ : List[str]=0.02 , snake_case__ : int=1e-5 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=10 , snake_case__ : Optional[Any]=8 , snake_case__ : Any=["stage1", "stage2", "stage3"] , snake_case__ : Tuple=[1, 2, 3] , ) -> Union[str, Any]: '''simple docstring''' snake_case : Any = parent snake_case : Optional[int] = batch_size snake_case : Union[str, Any] = image_size snake_case : Dict = patch_size snake_case : Optional[Any] = num_channels snake_case : Union[str, Any] = embed_dim snake_case : int = depths snake_case : List[str] = num_heads snake_case : Union[str, Any] = window_size snake_case : Union[str, Any] = mlp_ratio snake_case : List[Any] = qkv_bias snake_case : List[Any] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Union[str, Any] = drop_path_rate snake_case : int = hidden_act snake_case : Optional[int] = use_absolute_embeddings snake_case : int = patch_norm snake_case : Union[str, Any] = layer_norm_eps snake_case : Any = initializer_range snake_case : Optional[Any] = is_training snake_case : Tuple = scope snake_case : Optional[int] = use_labels snake_case : Optional[Any] = type_sequence_label_size snake_case : Union[str, Any] = encoder_stride snake_case : Any = out_features snake_case : Tuple = out_indices def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case : int = None if self.use_labels: snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : Dict = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int: '''simple docstring''' return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) snake_case : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> str: '''simple docstring''' snake_case : Optional[int] = MaskFormerSwinBackbone(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case__ ): snake_case : Tuple = ["stem"] snake_case : List[Any] = MaskFormerSwinBackbone(config=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' snake_case : Union[str, Any] = self.prepare_config_and_inputs() snake_case , snake_case , snake_case : List[Any] = config_and_inputs snake_case : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ): A__ : List[str] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) A__ : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} A__ : Optional[Any] = False A__ : List[Any] = False A__ : List[str] = False A__ : List[str] = False A__ : Union[str, Any] = False def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : str = MaskFormerSwinModelTester(self ) snake_case : Optional[int] = ConfigTester(self , config_class=snake_case__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]: '''simple docstring''' return def _SCREAMING_SNAKE_CASE (self : Dict ) -> str: '''simple docstring''' snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Dict: '''simple docstring''' snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case__ ) @unittest.skip("Swin does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' pass @unittest.skip("Swin does not support feedforward chunking" ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : int = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : str = model_class(snake_case__ ) snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case : Optional[Any] = [*signature.parameters.keys()] snake_case : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str: '''simple docstring''' pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ) -> Optional[int]: '''simple docstring''' snake_case : Tuple = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): snake_case : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) snake_case : int = outputs.hidden_states snake_case : Union[str, Any] = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) # Swin has a different seq_length snake_case : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case : int = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Dict = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Any = 3 snake_case : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case : str = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Optional[Any] = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : int ) -> str: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case__ : Union[str, Any] ): snake_case : Any = 0 return t def check_equivalence(snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[int]={} ): with torch.no_grad(): snake_case : Optional[Any] = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ) snake_case : Tuple = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple() def recursive_check(snake_case__ : List[str] , snake_case__ : Optional[Any] ): if isinstance(snake_case__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ): recursive_check(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case__ , snake_case__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1e-5 ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has""" f""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}.""" ) , ) recursive_check(snake_case__ , snake_case__ ) for model_class in self.all_model_classes: snake_case : Optional[int] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Dict = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) snake_case : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) @require_torch class UpperCAmelCase ( unittest.TestCase ,A_ ): A__ : int = (MaskFormerSwinBackbone,) if is_torch_available() else () A__ : int = MaskFormerSwinConfig def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModelTester(self ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Optional[int] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: snake_case : Optional[int] = backbone_class(snake_case__ ) backbone.to(snake_case__ ) backbone.eval() snake_case : Union[str, Any] = backbone(**snake_case__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case : Optional[int] = backbone(**snake_case__ , output_hidden_states=snake_case__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case , snake_case , snake_case : Dict = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case : Optional[Any] = backbone(**snake_case__ , output_attentions=snake_case__ ) self.assertIsNotNone(outputs.attentions )
59
0
from manim import * class lowerCamelCase_ ( A_ ): '''simple docstring''' def UpperCamelCase__ ( self) -> Optional[int]: __UpperCamelCase :Dict = Rectangle(height=0.5 , width=0.5) __UpperCamelCase :Optional[Any] = Rectangle(height=0.46 , width=0.46).set_stroke(width=0) __UpperCamelCase :List[str] = [mem.copy() for i in range(6)] __UpperCamelCase :Any = [mem.copy() for i in range(6)] __UpperCamelCase :Dict = VGroup(*snake_case__).arrange(snake_case__ , buff=0) __UpperCamelCase :Union[str, Any] = VGroup(*snake_case__).arrange(snake_case__ , buff=0) __UpperCamelCase :Any = VGroup(snake_case__ , snake_case__).arrange(snake_case__ , buff=0) __UpperCamelCase :Dict = Text('''CPU''' , font_size=24) __UpperCamelCase :Optional[Any] = Group(snake_case__ , snake_case__).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__) cpu.move_to([-2.5, -0.5, 0]) self.add(snake_case__) __UpperCamelCase :Any = [mem.copy() for i in range(1)] __UpperCamelCase :str = VGroup(*snake_case__).arrange(snake_case__ , buff=0) __UpperCamelCase :Tuple = Text('''GPU''' , font_size=24) __UpperCamelCase :List[Any] = Group(snake_case__ , snake_case__).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__) gpu.align_to(snake_case__ , snake_case__) gpu.set_x(gpu.get_x() - 1) self.add(snake_case__) __UpperCamelCase :Any = [mem.copy() for i in range(6)] __UpperCamelCase :Tuple = VGroup(*snake_case__).arrange(snake_case__ , buff=0) __UpperCamelCase :int = Text('''Model''' , font_size=24) __UpperCamelCase :List[str] = Group(snake_case__ , snake_case__).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__) model.move_to([3, -1.0, 0]) self.play( Create(snake_case__ , run_time=1) , Create(snake_case__ , run_time=1) , Create(snake_case__ , run_time=1) , ) __UpperCamelCase :List[str] = MarkupText( f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , ) __UpperCamelCase :List[str] = Square(side_length=2.2) key.move_to([-5, 2, 0]) __UpperCamelCase :Optional[int] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0]) step_a.move_to([2, 2, 0]) self.play(Write(snake_case__ , run_time=2.5) , Write(snake_case__) , Write(snake_case__)) self.add(snake_case__) __UpperCamelCase :str = [] __UpperCamelCase :str = [] __UpperCamelCase :Dict = [] for i, rect in enumerate(snake_case__): __UpperCamelCase :int = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(snake_case__ , opacity=0.7) cpu_target.move_to(snake_case__) cpu_target.generate_target() __UpperCamelCase :Optional[int] = 0.46 / 4 __UpperCamelCase :int = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=snake_case__) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case__ , buff=0.0) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case__ , buff=0.0) cpu_targs.append(snake_case__) first_animations.append(rect.animate(run_time=0.5).set_stroke(snake_case__)) second_animations.append(MoveToTarget(snake_case__ , run_time=1.5)) self.play(*snake_case__) self.play(*snake_case__) self.wait()
43
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : List[str] = [] snake_case : Optional[int] = [] snake_case : Any = [] for rt in rc.restypes: snake_case : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) snake_case : str = {name: i for i, name in enumerate(__lowerCamelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) snake_case : Optional[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : List[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : int = torch.tensor( __lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) snake_case : int = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein snake_case : List[Any] = restype_atomaa_to_atomaa[protein_aatype] snake_case : str = restype_atomaa_mask[protein_aatype] snake_case : str = residx_atomaa_mask snake_case : Any = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back snake_case : List[str] = restype_atomaa_to_atomaa[protein_aatype] snake_case : List[Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask snake_case : Union[str, Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device ) for restype, restype_letter in enumerate(rc.restypes ): snake_case : Optional[int] = rc.restype_atoa[restype_letter] snake_case : Any = rc.residue_atoms[restype_name] for atom_name in atom_names: snake_case : List[Any] = rc.atom_order[atom_name] snake_case : Optional[Any] = 1 snake_case : List[Any] = restype_atomaa_mask[protein_aatype] snake_case : int = residx_atomaa_mask return protein def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : Dict = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray ) snake_case : List[str] = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) ) return out
59
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Tuple = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys _lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
238
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __lowerCamelCase = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } __lowerCamelCase = { """unc-nlp/lxmert-base-uncased""": 5_12, } __lowerCamelCase = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class UpperCAmelCase ( A_ ): A__ : Any = VOCAB_FILES_NAMES A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A__ : Tuple = PRETRAINED_INIT_CONFIGURATION A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : List[Any] = LxmertTokenizer def __init__(self : Dict , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Optional[Any]="[SEP]" , snake_case__ : Optional[Any]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : Tuple="[MASK]" , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Dict , ) -> Optional[int]: '''simple docstring''' super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars ): snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) ) snake_case : str = do_lower_case snake_case : List[Any] = strip_accents snake_case : Optional[int] = tokenize_chinese_chars snake_case : int = normalizer_class(**snake_case__ ) snake_case : Optional[Any] = do_lower_case def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ) -> Any: '''simple docstring''' snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case : Optional[Any] = [self.sep_token_id] snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' snake_case : List[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
59
0
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : List[Any]=99 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : int=4 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : int="last" , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=0 , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_input_lengths SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = gelu_activation SCREAMING_SNAKE_CASE_ = sinusoidal_embeddings SCREAMING_SNAKE_CASE_ = causal SCREAMING_SNAKE_CASE_ = asm SCREAMING_SNAKE_CASE_ = n_langs SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = n_special SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = summary_type SCREAMING_SNAKE_CASE_ = use_proj SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = bos_token_id def lowerCAmelCase_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE_ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE_ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE_ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCAmelCase_ ( self : List[str] ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , ): SCREAMING_SNAKE_CASE_ = XLMModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) SCREAMING_SNAKE_CASE_ = model(snake_case__ , langs=snake_case__ ) SCREAMING_SNAKE_CASE_ = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , ): SCREAMING_SNAKE_CASE_ = XLMWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int , ): SCREAMING_SNAKE_CASE_ = XLMForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(snake_case__ ) SCREAMING_SNAKE_CASE_ = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) SCREAMING_SNAKE_CASE_ = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , ): SCREAMING_SNAKE_CASE_ = XLMForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(snake_case__ ) SCREAMING_SNAKE_CASE_ = model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) SCREAMING_SNAKE_CASE_ = model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) (SCREAMING_SNAKE_CASE_ ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE_ = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) (SCREAMING_SNAKE_CASE_ ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Dict , ): SCREAMING_SNAKE_CASE_ = XLMForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(snake_case__ ) SCREAMING_SNAKE_CASE_ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = XLMForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , ): SCREAMING_SNAKE_CASE_ = self.num_choices SCREAMING_SNAKE_CASE_ = XLMForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ( SCREAMING_SNAKE_CASE_ ) = config_and_inputs SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class lowerCamelCase_ ( A_ , A_ , A_ , unittest.TestCase ): '''simple docstring''' lowercase_ = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) lowercase_ = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase_ = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : str=False ): SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": SCREAMING_SNAKE_CASE_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) SCREAMING_SNAKE_CASE_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = XLMModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def lowerCAmelCase_ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*snake_case__ ) def lowerCAmelCase_ ( self : Tuple ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*snake_case__ ) def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*snake_case__ ) def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*snake_case__ ) def lowerCAmelCase_ ( self : List[Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*snake_case__ ) def lowerCAmelCase_ ( self : List[Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*snake_case__ ) def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case__ ) def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : int=1 ): self.assertIsInstance(snake_case__ , snake_case__ ) self.assertListEqual( [isinstance(snake_case__ , snake_case__ ) for iter_attentions in attentions] , [True] * len(snake_case__ ) ) self.assertEqual(len(snake_case__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(snake_case__ ): # adds PAD dummy token SCREAMING_SNAKE_CASE_ = min_length + idx + 1 SCREAMING_SNAKE_CASE_ = min_length + idx + 1 SCREAMING_SNAKE_CASE_ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case__ ) ) def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=1 ): self.assertIsInstance(snake_case__ , snake_case__ ) self.assertListEqual( [isinstance(snake_case__ , snake_case__ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case__ ) , ) self.assertEqual(len(snake_case__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(snake_case__ ): # adds PAD dummy token SCREAMING_SNAKE_CASE_ = min_length + idx + 1 SCREAMING_SNAKE_CASE_ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case__ ) , ) pass @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = XLMModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE_ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(snake_case__ ) SCREAMING_SNAKE_CASE_ = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case__ ) # the president SCREAMING_SNAKE_CASE_ = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference SCREAMING_SNAKE_CASE_ = model.generate(snake_case__ , do_sample=snake_case__ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case__ )
225
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase ( A_ ): A__ : Dict = (DDIMParallelScheduler,) A__ : Tuple = (("eta", 0.0), ("num_inference_steps", 50)) def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case : Any = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**snake_case__ ) return config def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Optional[int] ) -> Any: '''simple docstring''' snake_case : List[Any] = self.scheduler_classes[0] snake_case : Any = self.get_scheduler_config(**snake_case__ ) snake_case : Any = scheduler_class(**snake_case__ ) snake_case , snake_case : Union[str, Any] = 10, 0.0 snake_case : List[Any] = self.dummy_model() snake_case : Any = self.dummy_sample_deter scheduler.set_timesteps(snake_case__ ) for t in scheduler.timesteps: snake_case : Optional[int] = model(snake_case__ , snake_case__ ) snake_case : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample return sample def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str: '''simple docstring''' for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=snake_case__ ) snake_case : Optional[int] = self.scheduler_classes[0] snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 ) snake_case : Union[str, Any] = scheduler_class(**snake_case__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : str ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]: '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any: '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]: '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=snake_case__ , eta=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case : Dict = self.scheduler_classes[0] snake_case : Tuple = self.get_scheduler_config() snake_case : Dict = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5 def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict: '''simple docstring''' snake_case : Union[str, Any] = self.scheduler_classes[0] snake_case : List[Any] = self.get_scheduler_config() snake_case : int = scheduler_class(**snake_case__ ) snake_case , snake_case : Any = 10, 0.0 scheduler.set_timesteps(snake_case__ ) snake_case : Optional[Any] = self.dummy_model() snake_case : str = self.dummy_sample_deter snake_case : Dict = self.dummy_sample_deter + 0.1 snake_case : Dict = self.dummy_sample_deter - 0.1 snake_case : Optional[Any] = samplea.shape[0] snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 ) snake_case : Tuple = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ ) snake_case : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) snake_case : List[str] = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ ) snake_case : Dict = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case : List[Any] = self.full_loop() snake_case : Optional[Any] = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.223967 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = self.full_loop(prediction_type="v_prediction" ) snake_case : int = torch.sum(torch.abs(snake_case__ ) ) snake_case : Optional[int] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]: '''simple docstring''' snake_case : Dict = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) snake_case : str = torch.sum(torch.abs(snake_case__ ) ) snake_case : Optional[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]: '''simple docstring''' snake_case : int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) snake_case : Tuple = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
59
0
"""simple docstring""" class snake_case : """simple docstring""" def __init__( self : Optional[int] ,lowerCamelCase__ : list[int] ): UpperCAmelCase__ = len(snake_case__ ) UpperCAmelCase__ = [0] * len_array if len_array > 0: UpperCAmelCase__ = array[0] for i in range(1 ,snake_case__ ): UpperCAmelCase__ = self.prefix_sum[i - 1] + array[i] def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : int ): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : int ): UpperCAmelCase__ = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(snake_case__ ) return False if __name__ == "__main__": import doctest doctest.testmod()
98
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ): snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] snake_case : int = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(__lowerCamelCase ) <= key: return input_string for position, character in enumerate(__lowerCamelCase ): snake_case : Any = position % (lowest * 2) # puts it in bounds snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__lowerCamelCase ) snake_case : List[str] = ["".join(__lowerCamelCase ) for row in temp_grid] snake_case : Tuple = "".join(__lowerCamelCase ) return output_string def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ): snake_case : Dict = [] snake_case : Union[str, Any] = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] # generates template for position in range(len(__lowerCamelCase ) ): snake_case : List[str] = position % (lowest * 2) # puts it in bounds snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) snake_case : Tuple = 0 for row in temp_grid: # fills in the characters snake_case : Union[str, Any] = input_string[counter : counter + len(__lowerCamelCase )] grid.append(list(__lowerCamelCase ) ) counter += len(__lowerCamelCase ) snake_case : str = "" # reads as zigzag for position in range(len(__lowerCamelCase ) ): snake_case : Optional[int] = position % (lowest * 2) # puts it in bounds snake_case : Tuple = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def UpperCamelCase ( __lowerCamelCase : str ): snake_case : Tuple = {} for key_guess in range(1 , len(__lowerCamelCase ) ): # tries every key snake_case : Any = decrypt(__lowerCamelCase , __lowerCamelCase ) return results if __name__ == "__main__": import doctest doctest.testmod()
59
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCAmelCase_ : int = logging.get_logger(__name__) def _A (__a ) -> str: """simple docstring""" if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__lowerCamelCase ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class lowerCAmelCase__ ( A_ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self : Any , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : int , ): '''simple docstring''' super().__init__(**snake_case__) SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {"shortest_edge": 224} SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(snake_case__ , default_to_square=snake_case__) SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224} SCREAMING_SNAKE_CASE_ : str = get_size_dict(snake_case__ , param_name='''crop_size''') SCREAMING_SNAKE_CASE_ : Any = do_resize SCREAMING_SNAKE_CASE_ : Optional[Any] = size SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop SCREAMING_SNAKE_CASE_ : Dict = crop_size SCREAMING_SNAKE_CASE_ : Tuple = resample SCREAMING_SNAKE_CASE_ : Dict = do_rescale SCREAMING_SNAKE_CASE_ : Any = rescale_factor SCREAMING_SNAKE_CASE_ : Optional[int] = do_normalize SCREAMING_SNAKE_CASE_ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(snake_case__ , default_to_square=snake_case__) if "shortest_edge" in size: SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_resize_output_image_size(snake_case__ , size['''shortest_edge'''] , default_to_square=snake_case__) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE_ : Optional[int] = (size["height"], size["width"]) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}') return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(snake_case__) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}') return center_crop(snake_case__ , size=(size['''height'''], size['''width''']) , data_format=snake_case__ , **snake_case__) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): '''simple docstring''' return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): '''simple docstring''' return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : List[Any] = to_numpy_array(snake_case__) if do_resize: SCREAMING_SNAKE_CASE_ : int = self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__) if do_center_crop: SCREAMING_SNAKE_CASE_ : List[str] = self.center_crop(snake_case__ , size=snake_case__) if do_rescale: SCREAMING_SNAKE_CASE_ : int = self.rescale(image=snake_case__ , scale=snake_case__) if do_normalize: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__) SCREAMING_SNAKE_CASE_ : List[str] = to_channel_dimension_format(snake_case__ , snake_case__) return image def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ : Union[str, Any] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : Any = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ : Tuple = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else self.size SCREAMING_SNAKE_CASE_ : Any = get_size_dict(snake_case__ , default_to_square=snake_case__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(snake_case__ , param_name='''crop_size''') if not valid_images(snake_case__): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') SCREAMING_SNAKE_CASE_ : Tuple = make_batched(snake_case__) SCREAMING_SNAKE_CASE_ : Optional[Any] = [ [ self._preprocess_image( image=snake_case__ , do_resize=snake_case__ , size=snake_case__ , resample=snake_case__ , do_center_crop=snake_case__ , crop_size=snake_case__ , do_rescale=snake_case__ , rescale_factor=snake_case__ , do_normalize=snake_case__ , image_mean=snake_case__ , image_std=snake_case__ , data_format=snake_case__ , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE_ : List[str] = {"pixel_values": videos} return BatchFeature(data=snake_case__ , tensor_type=snake_case__)
91
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __lowerCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __lowerCamelCase = TaTokenizerFast __lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __lowerCamelCase = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
59
0
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME lowerCamelCase__ = ["""small""", """medium""", """large"""] lowerCamelCase__ = """lm_head.decoder.weight""" lowerCamelCase__ = """lm_head.weight""" def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Optional[int] = torch.load(__lowerCamelCase ) __lowerCAmelCase : Any = d.pop(__lowerCamelCase ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) lowerCamelCase__ = parser.parse_args() for MODEL in DIALOGPT_MODELS: lowerCamelCase__ = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl') lowerCamelCase__ = f'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
86
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class UpperCAmelCase : def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]: '''simple docstring''' logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) snake_case : Optional[Any] = model snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ ) snake_case : int = kwargs.get("latest_model_name" , snake_case__ ) def __call__(self : Tuple , **snake_case__ : str ) -> List[str]: '''simple docstring''' snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()} return self.model.run(snake_case__ , snake_case__ ) @staticmethod def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any: '''simple docstring''' if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) snake_case : Optional[int] = "CPUExecutionProvider" return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]: '''simple docstring''' snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name ) snake_case : str = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ ) if src_path.exists(): snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str: '''simple docstring''' if os.path.isfile(snake_case__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(snake_case__ , exist_ok=snake_case__ ) # saving model weights/files self._save_pretrained(snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple: '''simple docstring''' snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(snake_case__ ): snake_case : Any = OnnxRuntimeModel.load_model( os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ ) snake_case : Union[str, Any] = Path(snake_case__ ) # load model from hub else: # download model snake_case : Dict = hf_hub_download( repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , ) snake_case : List[Any] = Path(snake_case__ ).parent snake_case : Union[str, Any] = Path(snake_case__ ).name snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ ) return cls(model=snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = None if len(str(snake_case__ ).split("@" ) ) == 2: snake_case , snake_case : int = model_id.split("@" ) return cls._from_pretrained( model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
59
0
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger(__name__) SCREAMING_SNAKE_CASE_: str =tf.data.AUTOTUNE def lowerCAmelCase_ ( ) -> Any: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=__lowerCamelCase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=__lowerCamelCase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=__lowerCamelCase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=__lowerCamelCase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=__lowerCamelCase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=__lowerCamelCase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=__lowerCamelCase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=__lowerCamelCase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=__lowerCamelCase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=__lowerCamelCase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=__lowerCamelCase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=__lowerCamelCase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=__lowerCamelCase , default=5_12 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=__lowerCamelCase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=__lowerCamelCase , help="Model ID to upload to on the Hugging Face Hub." ) UpperCAmelCase_ = parser.parse_args() return args def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Optional[int]: '''simple docstring''' try: if args.tpu_name: UpperCAmelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: UpperCAmelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(__lowerCamelCase ) tf.tpu.experimental.initialize_tpu_system(__lowerCamelCase ) return tpu def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = 0 for file in file_list: UpperCAmelCase_ = file.split("/" )[-1] UpperCAmelCase_ = re.search(R"-\d+-(\d+)\.tfrecord" , __lowerCamelCase ).group(1 ) UpperCAmelCase_ = int(__lowerCamelCase ) num_samples += sample_count return num_samples def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=None ) -> str: '''simple docstring''' UpperCAmelCase_ = count_samples(__lowerCamelCase ) UpperCAmelCase_ = tf.data.Dataset.from_tensor_slices(__lowerCamelCase ) if shuffle: UpperCAmelCase_ = dataset.shuffle(len(__lowerCamelCase ) ) UpperCAmelCase_ = tf.data.TFRecordDataset(__lowerCamelCase , num_parallel_reads=__lowerCamelCase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here UpperCAmelCase_ = dataset.apply(tf.data.experimental.assert_cardinality(__lowerCamelCase ) ) UpperCAmelCase_ = dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase ) if shuffle: assert shuffle_buffer_size is not None UpperCAmelCase_ = dataset.shuffle(args.shuffle_buffer_size ) UpperCAmelCase_ = dataset.batch(__lowerCamelCase , drop_remainder=__lowerCamelCase ) UpperCAmelCase_ = dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase ) UpperCAmelCase_ = dataset.prefetch(__lowerCamelCase ) return dataset def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Dict: '''simple docstring''' if not args.no_tpu: UpperCAmelCase_ = initialize_tpu(__lowerCamelCase ) UpperCAmelCase_ = tf.distribute.TPUStrategy(__lowerCamelCase ) else: UpperCAmelCase_ = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer ) UpperCAmelCase_ = AutoConfig.from_pretrained(args.pretrained_model_config ) UpperCAmelCase_ = tokenizer.vocab_size UpperCAmelCase_ = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) UpperCAmelCase_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) UpperCAmelCase_ = count_samples(__lowerCamelCase ) UpperCAmelCase_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) UpperCAmelCase_ = steps_per_epoch * args.num_epochs with strategy.scope(): UpperCAmelCase_ = TFAutoModelForMaskedLM.from_config(__lowerCamelCase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built UpperCAmelCase_ = create_optimizer( num_train_steps=__lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__lowerCamelCase , metrics=["accuracy"] ) def decode_fn(snake_case_ : List[Any] ): UpperCAmelCase_ = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(__lowerCamelCase , __lowerCamelCase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. UpperCAmelCase_ = DataCollatorForLanguageModeling( tokenizer=__lowerCamelCase , mlm_probability=args.mlm_probability , mlm=__lowerCamelCase , return_tensors="tf" ) def mask_with_collator(snake_case_ : str ): # TF really needs an isin() function UpperCAmelCase_ = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) UpperCAmelCase_ = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(__lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__lowerCamelCase , ) return batch UpperCAmelCase_ = args.per_replica_batch_size * strategy.num_replicas_in_sync UpperCAmelCase_ = prepare_dataset( __lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , ) UpperCAmelCase_ = prepare_dataset( __lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , ) UpperCAmelCase_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__lowerCamelCase ) ) model.fit( __lowerCamelCase , validation_data=__lowerCamelCase , epochs=args.num_epochs , callbacks=__lowerCamelCase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Union[str, Any] =parse_args() main(args)
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger() @dataclass class UpperCAmelCase : A__ : nn.Module A__ : List[nn.Module] = field(default_factory=A_ ) A__ : list = field(default_factory=A_ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Tensor , snake_case__ : Tensor ) -> Optional[Any]: '''simple docstring''' snake_case : List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case__ ) def __call__(self : List[Any] , snake_case__ : Tensor ) -> List[Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case__ ) [x.remove() for x in self.handles] return self @property def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]: '''simple docstring''' return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class UpperCAmelCase : A__ : nn.Module A__ : nn.Module A__ : int = 1 A__ : List = field(default_factory=A_ ) A__ : List = field(default_factory=A_ ) A__ : bool = True def __call__(self : List[Any] , snake_case__ : Tensor ) -> Any: '''simple docstring''' snake_case : str = Tracker(self.dest )(snake_case__ ).parametrized snake_case : Optional[int] = Tracker(self.src )(snake_case__ ).parametrized snake_case : List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) ) snake_case : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) ) if len(snake_case__ ) != len(snake_case__ ) and self.raise_if_mismatch: raise Exception( f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while""" f""" destination module has {len(snake_case__ )}.""" ) for dest_m, src_m in zip(snake_case__ , snake_case__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) class UpperCAmelCase ( nn.Module ): def __init__(self : Tuple , snake_case__ : nn.Module ) -> Optional[Any]: '''simple docstring''' super().__init__() snake_case : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block" ), f"""Unexpected layer name {k}""" snake_case : Union[str, Any] = len(snake_case__ ) + 1 feature_blocks.append((f"""res{block_index}""", v) ) snake_case : Optional[Any] = nn.ModuleDict(snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tensor ) -> Dict: '''simple docstring''' return get_trunk_forward_outputs( snake_case__ , out_feat_keys=snake_case__ , feature_blocks=self._feature_blocks , ) class UpperCAmelCase ( A_ ): def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> str: '''simple docstring''' snake_case : List[Any] = x.split("-" ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__(self : Optional[int] , snake_case__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]: '''simple docstring''' if x not in self: snake_case : Dict = self.convert_name_to_timm(snake_case__ ) snake_case : Union[str, Any] = partial(lambda: (timm.create_model(snake_case__ , pretrained=snake_case__ ).eval(), None) ) else: snake_case : List[str] = super().__getitem__(snake_case__ ) return val class UpperCAmelCase ( A_ ): def __getitem__(self : Dict , snake_case__ : str ) -> Callable[[], nn.Module]: '''simple docstring''' if "seer" in x and "in1k" not in x: snake_case : str = RegNetModel else: snake_case : Optional[Any] = RegNetForImageClassification return val def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Tuple[str, str]] ): for from_key, to_key in keys: snake_case : str = from_state_dict[from_key].clone() print(f"""Copied key={from_key} to={to_key}""" ) return to_state_dict def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : RegNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True , ): print(f"""Converting {name}...""" ) with torch.no_grad(): snake_case , snake_case : int = from_model_func() snake_case : str = our_model_func(__lowerCamelCase ).eval() snake_case : int = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase , raise_if_mismatch=__lowerCamelCase ) snake_case : Dict = torch.randn((1, 3, 224, 224) ) module_transfer(__lowerCamelCase ) if from_state_dict is not None: snake_case : str = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: snake_case : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] snake_case : Optional[Any] = manually_copy_vissl_head(__lowerCamelCase , our_model.state_dict() , __lowerCamelCase ) our_model.load_state_dict(__lowerCamelCase ) snake_case : Any = our_model(__lowerCamelCase , output_hidden_states=__lowerCamelCase ) snake_case : Union[str, Any] = ( our_outputs.logits if isinstance(__lowerCamelCase , __lowerCamelCase ) else our_outputs.last_hidden_state ) snake_case : Union[str, Any] = from_model(__lowerCamelCase ) snake_case : Dict = from_output[-1] if type(__lowerCamelCase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: snake_case : Any = our_outputs.hidden_states[-1] assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowerCamelCase , ) snake_case : List[str] = 224 if "seer" not in name else 384 # we can use the convnext one snake_case : int = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowerCamelCase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowerCamelCase , ) print(f"""Pushed {name}""" ) def UpperCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ): snake_case : Union[str, Any] = "imagenet-1k-id2label.json" snake_case : List[str] = 1000 snake_case : List[str] = (1, num_labels) snake_case : Any = "huggingface/label-files" snake_case : List[str] = num_labels snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) ) snake_case : List[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} snake_case : str = idalabel snake_case : List[Any] = {v: k for k, v in idalabel.items()} snake_case : Dict = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) snake_case : Optional[Any] = { "regnet-x-002": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ), "regnet-x-004": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ), "regnet-x-006": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ), "regnet-x-008": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ), "regnet-x-016": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ), "regnet-x-032": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ), "regnet-x-040": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ), "regnet-x-064": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ), "regnet-x-080": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ), "regnet-x-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ), "regnet-x-160": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ), "regnet-x-320": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ), # y variant "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), "regnet-y-004": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), "regnet-y-006": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), "regnet-y-008": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), "regnet-y-016": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), "regnet-y-032": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), "regnet-y-040": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), "regnet-y-064": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), "regnet-y-080": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), "regnet-y-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), "regnet-y-160": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), "regnet-y-320": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer": RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer": RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } snake_case : Union[str, Any] = NameToOurModelFuncMap() snake_case : str = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(__lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]: snake_case : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , model_dir=str(__lowerCamelCase ) , map_location="cpu" ) snake_case : Dict = model_func() # check if we have a head, if yes add it snake_case : str = files["classy_state_dict"]["base_model"]["model"] snake_case : Dict = model_state_dict["trunk"] model.load_state_dict(__lowerCamelCase ) return model.eval(), model_state_dict["heads"] # pretrained snake_case : List[Any] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : Optional[int] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : List[str] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case : Tuple = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned snake_case : List[Any] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : Tuple = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : str = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case : Dict = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( __lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( __lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported regnet* architecture,""" """ currently: regnetx-*, regnety-*. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
59
0
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ): try: lowerCamelCase_ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowerCamelCase_ = default else: # KEY is set, convert it to True or False. try: lowerCamelCase_ = strtobool(__lowerCamelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'If set, {key} must be yes or no.' ) return _value __A =parse_flag_from_env('''RUN_SLOW''', default=False) __A =parse_flag_from_env('''RUN_REMOTE''', default=False) __A =parse_flag_from_env('''RUN_LOCAL''', default=True) __A =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression __A =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') __A =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') __A =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio __A =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam __A =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility __A =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows __A =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowerCamelCase_ ( lowerCamelCase__ ): try: import faiss # noqa except ImportError: lowerCamelCase_ = unittest.skip("test requires faiss" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): try: import regex # noqa except ImportError: lowerCamelCase_ = unittest.skip("test requires regex" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): try: import elasticsearch # noqa except ImportError: lowerCamelCase_ = unittest.skip("test requires elasticsearch" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): try: import sqlalchemy # noqa except ImportError: lowerCamelCase_ = unittest.skip("test requires sqlalchemy" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): if not config.TORCH_AVAILABLE: lowerCamelCase_ = unittest.skip("test requires PyTorch" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): if not config.TF_AVAILABLE: lowerCamelCase_ = unittest.skip("test requires TensorFlow" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): if not config.JAX_AVAILABLE: lowerCamelCase_ = unittest.skip("test requires JAX" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): if not config.PIL_AVAILABLE: lowerCamelCase_ = unittest.skip("test requires Pillow" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers" )(__lowerCamelCase ) else: return test_case def lowerCamelCase_ ( lowerCamelCase__ ): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken" )(__lowerCamelCase ) else: return test_case def lowerCamelCase_ ( lowerCamelCase__ ): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy" )(__lowerCamelCase ) else: return test_case def lowerCamelCase_ ( lowerCamelCase__ ): def _require_spacy_model(lowerCamelCase__ ): try: import spacy # noqa F401 spacy.load(__lowerCamelCase ) except ImportError: return unittest.skip("test requires spacy" )(__lowerCamelCase ) except OSError: return unittest.skip("test requires spacy model '{}'".format(__lowerCamelCase ) )(__lowerCamelCase ) else: return test_case return _require_spacy_model def lowerCamelCase_ ( lowerCamelCase__ ): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark" )(__lowerCamelCase ) else: return test_case def lowerCamelCase_ ( lowerCamelCase__ ): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark" )(__lowerCamelCase ) else: return test_case def lowerCamelCase_ ( lowerCamelCase__ ): if not _run_slow_tests or _run_slow_tests == 0: lowerCamelCase_ = unittest.skip("test is slow" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): if not _run_local_tests or _run_local_tests == 0: lowerCamelCase_ = unittest.skip("test is local" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): if not _run_packaged_tests or _run_packaged_tests == 0: lowerCamelCase_ = unittest.skip("test is packaged" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( lowerCamelCase__ ): if not _run_remote_tests or _run_remote_tests == 0: lowerCamelCase_ = unittest.skip("test requires remote" )(__lowerCamelCase ) return test_case def lowerCamelCase_ ( *lowerCamelCase__ ): def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(__lowerCamelCase ) and name.startswith("test" ): for decorator in decorators: lowerCamelCase_ = decorator(__lowerCamelCase ) setattr(cls , __lowerCamelCase , __lowerCamelCase ) return cls return decorate class _SCREAMING_SNAKE_CASE ( A_ ): pass class _SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 lowerCAmelCase__ = 2 @contextmanager def lowerCamelCase_ ( lowerCamelCase__=OfflineSimulationMode.CONNECTION_FAILS , lowerCamelCase__=1e-16 ): lowerCamelCase_ = requests.Session().request def timeout_request(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ): # Change the url to an invalid url so that the connection hangs lowerCamelCase_ = "https://10.255.255.1" if kwargs.get("timeout" ) is None: raise RequestWouldHangIndefinitelyError( F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' ) lowerCamelCase_ = timeout try: return online_request(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowerCamelCase_ = url lowerCamelCase_ = e.args[0] lowerCamelCase_ = (max_retry_error.args[0].replace("10.255.255.1" , F'OfflineMock[{url}]' ),) lowerCamelCase_ = (max_retry_error,) raise def raise_connection_error(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ): raise requests.ConnectionError("Offline mode is enabled." , request=__lowerCamelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , __lowerCamelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , __lowerCamelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase ): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum." ) @contextmanager def lowerCamelCase_ ( *lowerCamelCase__ , **lowerCamelCase__ ): lowerCamelCase_ = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowerCamelCase , **__lowerCamelCase ) as tmp_dir: try: os.chdir(__lowerCamelCase ) yield finally: os.chdir(__lowerCamelCase ) @contextmanager def lowerCamelCase_ ( ): import gc gc.collect() lowerCamelCase_ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowerCamelCase_ ( ): import gc gc.collect() lowerCamelCase_ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): return deepcopy(__lowerCamelCase ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(__lowerCamelCase ).integers(0 , 1_0_0 , 1_0 ).tolist() def lowerCamelCase_ ( lowerCamelCase__ ): import decorator from requests.exceptions import HTTPError def _wrapper(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ): try: return func(*__lowerCamelCase , **__lowerCamelCase ) except HTTPError as err: if str(__lowerCamelCase ).startswith("500" ) or str(__lowerCamelCase ).startswith("502" ): pytest.xfail(str(__lowerCamelCase ) ) raise err return decorator.decorator(_wrapper , __lowerCamelCase ) class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase , lowercase , lowercase ) -> Optional[int]: lowerCamelCase_ = returncode lowerCamelCase_ = stdout lowerCamelCase_ = stderr async def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): while True: lowerCamelCase_ = await stream.readline() if line: callback(__lowerCamelCase ) else: break async def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False ): if echo: print("\nRunning: " , " ".join(__lowerCamelCase ) ) lowerCamelCase_ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowerCamelCase_ = [] lowerCamelCase_ = [] def tee(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" ): lowerCamelCase_ = line.decode("utf-8" ).rstrip() sink.append(__lowerCamelCase ) if not quiet: print(__lowerCamelCase , __lowerCamelCase , file=__lowerCamelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda lowerCamelCase__ : tee(__lowerCamelCase , __lowerCamelCase , sys.stdout , label="stdout:" ) ), _read_stream(p.stderr , lambda lowerCamelCase__ : tee(__lowerCamelCase , __lowerCamelCase , sys.stderr , label="stderr:" ) ), ] , timeout=__lowerCamelCase , ) return _RunOutput(await p.wait() , __lowerCamelCase , __lowerCamelCase ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=1_8_0 , lowerCamelCase__=False , lowerCamelCase__=True ): lowerCamelCase_ = asyncio.get_event_loop() lowerCamelCase_ = loop.run_until_complete( _stream_subprocess(__lowerCamelCase , env=__lowerCamelCase , stdin=__lowerCamelCase , timeout=__lowerCamelCase , quiet=__lowerCamelCase , echo=__lowerCamelCase ) ) lowerCamelCase_ = " ".join(__lowerCamelCase ) if result.returncode > 0: lowerCamelCase_ = "\n".join(result.stderr ) raise RuntimeError( F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n' F'The combined stderr from workers follows:\n{stderr}' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'\'{cmd_str}\' produced no output.' ) return result def lowerCamelCase_ ( ): lowerCamelCase_ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" ) lowerCamelCase_ = re.sub(r"^gw" , "" , __lowerCamelCase , 0 , re.M ) return int(__lowerCamelCase ) def lowerCamelCase_ ( ): lowerCamelCase_ = 2_9_5_0_0 lowerCamelCase_ = pytest_xdist_worker_id() return port + uniq_delta
19
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def UpperCamelCase ( __lowerCamelCase : List[Any] ): return 1.0 / (1.0 + np.exp(-_outputs )) def UpperCamelCase ( __lowerCamelCase : int ): snake_case : Tuple = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase ) snake_case : int = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase ) class UpperCAmelCase ( A_ ): A__ : Any = "sigmoid" A__ : str = "softmax" A__ : int = "none" @add_end_docstrings( A_ ,r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " ,) class UpperCAmelCase ( A_ ): A__ : int = False A__ : Union[str, Any] = ClassificationFunction.NONE def __init__(self : List[str] , **snake_case__ : int ) -> str: '''simple docstring''' super().__init__(**snake_case__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]="" , **snake_case__ : List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = tokenizer_kwargs snake_case : List[Any] = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: snake_case : Optional[int] = self.model.config.return_all_scores if isinstance(snake_case__ , snake_case__ ) or top_k is None: snake_case : List[Any] = top_k snake_case : str = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case__ , ) if return_all_scores: snake_case : List[str] = None else: snake_case : Optional[int] = 1 if isinstance(snake_case__ , snake_case__ ): snake_case : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: snake_case : Optional[int] = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self : Dict , *snake_case__ : List[str] , **snake_case__ : int ) -> Optional[int]: '''simple docstring''' snake_case : Optional[int] = super().__call__(*snake_case__ , **snake_case__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. snake_case : Tuple = "top_k" not in kwargs if isinstance(args[0] , snake_case__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , **snake_case__ : Union[str, Any] ) -> Dict[str, GenericTensor]: '''simple docstring''' snake_case : int = self.framework if isinstance(snake_case__ , snake_case__ ): return self.tokenizer(**snake_case__ , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1 and isinstance(inputs[0] , snake_case__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] ) -> int: '''simple docstring''' return self.model(**snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : Dict=1 , snake_case__ : Tuple=True ) -> str: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: snake_case : Tuple = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: snake_case : Tuple = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: snake_case : Tuple = self.model.config.function_to_apply else: snake_case : int = ClassificationFunction.NONE snake_case : Any = model_outputs["logits"][0] snake_case : List[str] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: snake_case : Optional[Any] = sigmoid(snake_case__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: snake_case : Union[str, Any] = softmax(snake_case__ ) elif function_to_apply == ClassificationFunction.NONE: snake_case : Optional[Any] = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} snake_case : Optional[int] = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case__ ) ] if not _legacy: dict_scores.sort(key=lambda snake_case__ : x["score"] , reverse=snake_case__ ) if top_k is not None: snake_case : Optional[int] = dict_scores[:top_k] return dict_scores
59
0
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE :Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right SCREAMING_SNAKE_CASE :str = 25_0004 SCREAMING_SNAKE_CASE :Dict = 25_0020 @require_sentencepiece @require_tokenizers class UpperCAmelCase ( A_ , unittest.TestCase ): '''simple docstring''' snake_case_ = MBartaaTokenizer snake_case_ = MBartaaTokenizerFast snake_case_ = True snake_case_ = True def UpperCamelCase_ ( self : int ): super().setUp() # We have a SentencePiece fixture for testing __A = MBartaaTokenizer(snake_case__ ,src_lang="en_XX" ,tgt_lang="ro_RO" ,keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self : List[str] ): __A = "<s>" __A = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) ,snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) ,snake_case__ ) def UpperCamelCase_ ( self : str ): __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<s>" ) self.assertEqual(vocab_keys[1] ,"<pad>" ) self.assertEqual(vocab_keys[-1] ,"<mask>" ) self.assertEqual(len(snake_case__ ) ,10_54 ) def UpperCamelCase_ ( self : int ): self.assertEqual(self.get_tokenizer().vocab_size ,10_54 ) def UpperCamelCase_ ( self : Any ): __A = MBartaaTokenizer(snake_case__ ,src_lang="en_XX" ,tgt_lang="ro_RO" ,keep_accents=snake_case__ ) __A = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case__ ,["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,) __A = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( snake_case__ ,[SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] ,) __A = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) __A = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ ,[SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] ,) @slow def UpperCamelCase_ ( self : Union[str, Any] ): __A = {"input_ids": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ ,model_name="facebook/mbart-large-50" ,revision="d3913889c59cd5c9e456b269c376325eabad57e2" ,) def UpperCamelCase_ ( self : List[str] ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __A = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __A = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ ) __A = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ ) __A = tempfile.mkdtemp() __A = tokenizer_r.save_pretrained(snake_case__ ) __A = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) __A = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(snake_case__ ,snake_case__ ) # Checks everything loads correctly in the same way __A = tokenizer_r.from_pretrained(snake_case__ ) __A = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ ,snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True __A = tempfile.mkdtemp() __A = tokenizer_r.save_pretrained(snake_case__ ,legacy_format=snake_case__ ) __A = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ ,snake_case__ ) # Checks everything loads correctly in the same way __A = tokenizer_r.from_pretrained(snake_case__ ) __A = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ ,snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False __A = tempfile.mkdtemp() __A = tokenizer_r.save_pretrained(snake_case__ ,legacy_format=snake_case__ ) __A = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __A = tokenizer_r.from_pretrained(snake_case__ ) __A = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ ,snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' snake_case_ = "facebook/mbart-large-50-one-to-many-mmt" snake_case_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] snake_case_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] snake_case_ = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2] @classmethod def UpperCamelCase_ ( cls : Optional[int] ): __A = MBartaaTokenizer.from_pretrained( cls.checkpoint_name ,src_lang="en_XX" ,tgt_lang="ro_RO" ) __A = 1 return cls def UpperCamelCase_ ( self : int ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] ,25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] ,25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] ,25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] ,25_00_38 ) def UpperCamelCase_ ( self : int ): __A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,snake_case__ ) def UpperCamelCase_ ( self : List[str] ): self.assertIn(snake_case__ ,self.tokenizer.all_special_ids ) __A = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __A = self.tokenizer.decode(snake_case__ ,skip_special_tokens=snake_case__ ) __A = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ ,snake_case__ ) self.assertNotIn(self.tokenizer.eos_token ,snake_case__ ) def UpperCamelCase_ ( self : Optional[Any] ): __A = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] ,snake_case__ ) __A = 10 __A = self.tokenizer(snake_case__ ,max_length=snake_case__ ,truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] ,snake_case__ ) self.assertEqual(ids[-1] ,2 ) self.assertEqual(len(snake_case__ ) ,snake_case__ ) def UpperCamelCase_ ( self : str ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) ,[25_00_53, 25_00_01] ) def UpperCamelCase_ ( self : Optional[Any] ): __A = tempfile.mkdtemp() __A = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) __A = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,snake_case__ ) @require_torch def UpperCamelCase_ ( self : Optional[int] ): __A = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=snake_case__ ,return_tensors="pt" ) __A = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def UpperCamelCase_ ( self : Union[str, Any] ): __A = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=snake_case__ ,truncation=snake_case__ ,max_length=len(self.expected_src_tokens ) ,return_tensors="pt" ,) __A = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ ,snake_case__ ) self.assertEqual((2, 14) ,batch.input_ids.shape ) self.assertEqual((2, 14) ,batch.attention_mask.shape ) __A = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,snake_case__ ) self.assertEqual(2 ,batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) def UpperCamelCase_ ( self : List[Any] ): __A = self.tokenizer(self.src_text ,padding=snake_case__ ,truncation=snake_case__ ,max_length=3 ,return_tensors="pt" ) __A = self.tokenizer( text_target=self.tgt_text ,padding=snake_case__ ,truncation=snake_case__ ,max_length=10 ,return_tensors="pt" ) __A = targets["input_ids"] __A = shift_tokens_right(snake_case__ ,self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def UpperCamelCase_ ( self : Optional[Any] ): __A = self.tokenizer._build_translation_inputs( "A test" ,return_tensors="pt" ,src_lang="en_XX" ,tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(snake_case__ ) ,{ # en_XX, A, test, EOS "input_ids": [[25_00_04, 62, 30_34, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 25_00_01, } ,)
15
from __future__ import annotations __lowerCamelCase = list[list[int]] # assigning initial values to the grid __lowerCamelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __lowerCamelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def UpperCamelCase ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def UpperCamelCase ( __lowerCamelCase : Matrix ): for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def UpperCamelCase ( __lowerCamelCase : Matrix ): if location := find_empty_location(__lowerCamelCase ): snake_case , snake_case : Union[str, Any] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): snake_case : List[Any] = digit if sudoku(__lowerCamelCase ) is not None: return grid snake_case : Union[str, Any] = 0 return None def UpperCamelCase ( __lowerCamelCase : Matrix ): for row in grid: for cell in row: print(__lowerCamelCase , end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") __lowerCamelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
59
0
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : Any=1E-12 ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCamelCase , axis=1 ) , a_min=__lowerCamelCase ) ).T __UpperCAmelCase : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCamelCase , axis=1 ) , a_min=__lowerCamelCase ) ).T return jnp.matmul(__lowerCamelCase , norm_emb_a.T ) class lowerCamelCase__ ( nn.Module ): """simple docstring""" __a = 42 __a = jnp.floataa def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxCLIPVisionModule(self.config.vision_config ) __UpperCAmelCase : List[Any] = nn.Dense(self.config.projection_dim , use_bias=snake_case__ , dtype=self.dtype ) __UpperCAmelCase : Tuple = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) ) __UpperCAmelCase : Optional[int] = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) __UpperCAmelCase : Any = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) ) __UpperCAmelCase : Any = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) ) def __call__( self : Tuple , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.vision_model(snake_case__ )[1] __UpperCAmelCase : int = self.visual_projection(snake_case__ ) __UpperCAmelCase : str = jax_cosine_distance(snake_case__ , self.special_care_embeds ) __UpperCAmelCase : Dict = jax_cosine_distance(snake_case__ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs __UpperCAmelCase : List[str] = 0.0 __UpperCAmelCase : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment __UpperCAmelCase : int = jnp.round(snake_case__ , 3 ) __UpperCAmelCase : Optional[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=snake_case__ ) # Use a lower threshold if an image has any special care concept __UpperCAmelCase : str = is_special_care * 0.01 __UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment __UpperCAmelCase : Optional[Any] = jnp.round(snake_case__ , 3 ) __UpperCAmelCase : int = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class lowerCamelCase__ ( A_ ): """simple docstring""" __a = CLIPConfig __a = "clip_input" __a = FlaxStableDiffusionSafetyCheckerModule def __init__( self : str , UpperCamelCase : CLIPConfig , UpperCamelCase : Optional[Tuple] = None , UpperCamelCase : int = 0 , UpperCamelCase : jnp.dtype = jnp.floataa , UpperCamelCase : bool = True , **UpperCamelCase : Dict , ): '''simple docstring''' if input_shape is None: __UpperCAmelCase : int = (1, 224, 224, 3) __UpperCAmelCase : Union[str, Any] = self.module_class(config=snake_case__ , dtype=snake_case__ , **snake_case__ ) super().__init__(snake_case__ , snake_case__ , input_shape=snake_case__ , seed=snake_case__ , dtype=snake_case__ , _do_init=_do_init ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : jax.random.KeyArray , UpperCamelCase : Tuple , UpperCamelCase : FrozenDict = None ): '''simple docstring''' __UpperCAmelCase : str = jax.random.normal(snake_case__ , snake_case__ ) __UpperCAmelCase : int = jax.random.split(snake_case__ ) __UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng} __UpperCAmelCase : Optional[int] = self.module.init(snake_case__ , snake_case__ )["params"] return random_params def __call__( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : dict = None , ): '''simple docstring''' __UpperCAmelCase : int = jnp.transpose(snake_case__ , (0, 2, 3, 1) ) return self.module.apply( {"""params""": params or self.params} , jnp.array(snake_case__ , dtype=jnp.floataa ) , rngs={} , )
115
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def UpperCamelCase ( __lowerCamelCase : np.ndarray ): return input_array.reshape((input_array.size, 1) ) def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Any = np.nan for i in range(__lowerCamelCase ): snake_case : List[str] = features[:, labels == i] snake_case : Dict = data.mean(1 ) # Centralize the data of class i snake_case : Optional[Any] = data - column_reshape(__lowerCamelCase ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(__lowerCamelCase , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Optional[Any] = features.mean(1 ) snake_case : Tuple = np.nan for i in range(__lowerCamelCase ): snake_case : Tuple = features[:, labels == i] snake_case : Tuple = data.shape[1] snake_case : List[str] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[int] = device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): # Check if the features have been loaded if features.any(): snake_case : Tuple = features.mean(1 ) # Center the dataset snake_case : List[str] = features - np.reshape(__lowerCamelCase , (data_mean.size, 1) ) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) / features.shape[1] snake_case , snake_case : Dict = np.linalg.eigh(__lowerCamelCase ) # Take all the columns in the reverse order (-1), and then takes only the first snake_case : Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space snake_case : Union[str, Any] = np.dot(filtered_eigenvectors.T , __lowerCamelCase ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ): assert classes > dimensions # Check if features have been already loaded if features.any: snake_case , snake_case : str = eigh( covariance_between_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , covariance_within_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , ) snake_case : str = eigenvectors[:, ::-1][:, :dimensions] snake_case , snake_case , snake_case : int = np.linalg.svd(__lowerCamelCase ) snake_case : List[Any] = svd_matrix[:, 0:dimensions] snake_case : Optional[Any] = np.dot(filtered_svd_matrix.T , __lowerCamelCase ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( ): # Create dummy dataset with 2 classes and 3 features snake_case : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) snake_case : Union[str, Any] = np.array([0, 0, 0, 1, 1] ) snake_case : List[Any] = 2 snake_case : Any = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(__lowerCamelCase ) as error_info: snake_case : str = linear_discriminant_analysis( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if isinstance(__lowerCamelCase , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def UpperCamelCase ( ): snake_case : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) snake_case : List[str] = 2 snake_case : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(__lowerCamelCase ) as error_info: snake_case : Union[str, Any] = principal_component_analysis(__lowerCamelCase , __lowerCamelCase ) if not np.allclose(__lowerCamelCase , __lowerCamelCase ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
59
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : List[Any] ={ "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] =[ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
237
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def UpperCamelCase ( __lowerCamelCase : Optional[int] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def UpperCamelCase ( __lowerCamelCase : str ): class UpperCAmelCase : def __init__(self : Optional[int] , snake_case__ : str ) -> Any: '''simple docstring''' snake_case : List[str] = metric_id class UpperCAmelCase : A__ : List[str] = [MetricMock(A_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]: '''simple docstring''' return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Any ): if "tmp_path" in args: snake_case : str = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(__lowerCamelCase , match="https://huggingface.co/docs/evaluate" ): func(*__lowerCamelCase )
59
0
import enum import shutil import sys __lowercase , __lowercase = shutil.get_terminal_size() __lowercase = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''} class lowerCamelCase_ ( enum.Enum ): '''simple docstring''' a__ : Dict = 0 a__ : str = 1 def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" ): '''simple docstring''' sys.stdout.write(str(__lowerCamelCase ) + end ) sys.stdout.flush() def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" ): '''simple docstring''' forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , __lowerCamelCase ) def lowerCamelCase ( ): '''simple docstring''' forceWrite('''\r''' ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def lowerCamelCase ( ): '''simple docstring''' forceWrite(''' ''' * TERMINAL_WIDTH ) reset_cursor() def lowerCamelCase ( ): '''simple docstring''' reset_cursor() forceWrite('''-''' * TERMINAL_WIDTH )
43
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __lowerCamelCase = logging.getLogger(__name__) __lowerCamelCase = """pytorch_model.bin""" @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} ,) @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "A csv or a json file containing the validation data."} ) A__ : Optional[str] = dataclasses.field( default=A_ ,metadata={"help": "The name of the task to train on."} ,) A__ : Optional[List[str]] = dataclasses.field( default=A_ ,metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class UpperCAmelCase : A__ : str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) A__ : Optional[str] = dataclasses.field( default="accuracy" ,metadata={"help": "The evaluation metric used for the task."} ) A__ : Optional[str] = dataclasses.field( default="no" ,metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } ,) A__ : Optional[int] = dataclasses.field( default=10 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,) A__ : Optional[float] = dataclasses.field( default=0.0 ,metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} ,) A__ : Optional[bool] = dataclasses.field( default=A_ ,metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} ,) A__ : Optional[float] = dataclasses.field( default=0.0 ,metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} ,) A__ : Optional[int] = dataclasses.field( default=1_00 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,) A__ : Optional[int] = dataclasses.field( default=A_ ,metadata={"help": "Random seed for initialization."} ,) def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ): snake_case : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: snake_case : Optional[int] = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 snake_case : int = int(eval_result * len(__lowerCamelCase ) ) print(__lowerCamelCase ) snake_case : List[str] = dataset.sort("probability" , reverse=__lowerCamelCase ) snake_case : Tuple = dataset.select(range(__lowerCamelCase ) ) snake_case : List[Any] = dataset.remove_columns(["label", "probability"] ) snake_case : Any = dataset.rename_column("prediction" , "label" ) snake_case : str = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} ) snake_case : List[str] = dataset.shuffle(seed=args.seed ) snake_case : int = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(__lowerCamelCase , index=__lowerCamelCase ) else: dataset.to_json(__lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ): snake_case : int = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() snake_case : Dict = STModelArguments(model_name_or_path=__lowerCamelCase ) snake_case : Tuple = STDataArguments(train_file=__lowerCamelCase , infer_file=__lowerCamelCase ) snake_case : str = STTrainingArguments(output_dir=__lowerCamelCase ) snake_case : int = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__lowerCamelCase ).items(): setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for key, value in kwargs.items(): if hasattr(__lowerCamelCase , __lowerCamelCase ): setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Sanity checks snake_case : List[str] = {} snake_case : Optional[int] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None snake_case : str = args.train_file snake_case : Tuple = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None snake_case : Tuple = args.eval_file for key in data_files: snake_case : List[Any] = data_files[key].split("." )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: snake_case : Union[str, Any] = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("Creating the initial data directory for self-training..." ) snake_case : List[Any] = f"""{args.output_dir}/self-train_iter-{{}}""".format snake_case : Optional[int] = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) accelerator.wait_for_everyone() snake_case : Dict = None snake_case : Union[str, Any] = None snake_case : Tuple = 0 snake_case : List[Any] = False # Show the progress bar snake_case : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): snake_case : str = data_dir_format(__lowerCamelCase ) assert os.path.exists(__lowerCamelCase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 snake_case : Dict = os.path.join(__lowerCamelCase , "stage-1" ) snake_case : Optional[Any] = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__lowerCamelCase , __lowerCamelCase ): arguments_dict.update({key: value} ) snake_case : int = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowerCamelCase , __lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowerCamelCase ) finetune(**__lowerCamelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCamelCase ) logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowerCamelCase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data snake_case : str = os.path.join(__lowerCamelCase , "best-checkpoint" ) snake_case : Dict = os.path.join(__lowerCamelCase , "stage-2" ) # Update arguments_dict snake_case : List[str] = model_path snake_case : Optional[Any] = data_files["train"] snake_case : Optional[Any] = current_output_dir snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowerCamelCase , __lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowerCamelCase ) finetune(**__lowerCamelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCamelCase ) logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowerCamelCase ) snake_case : int = iteration snake_case : Tuple = data_dir_format(iteration + 1 ) snake_case : Tuple = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase , "best-checkpoint" ) ) snake_case : Optional[int] = config.idalabel snake_case : List[Any] = os.path.join(__lowerCamelCase , "eval_results_best-checkpoint.json" ) snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "test_results_best-checkpoint.json" ) assert os.path.exists(__lowerCamelCase ) with open(__lowerCamelCase , "r" ) as f: snake_case : Dict = float(json.load(__lowerCamelCase )[args.eval_metric] ) snake_case : Optional[int] = os.path.join(__lowerCamelCase , "infer_output_best-checkpoint.csv" ) assert os.path.exists(__lowerCamelCase ) # Loading the dataset from local csv or json files. snake_case : Optional[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"] snake_case : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"] if accelerator.is_main_process: os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(__lowerCamelCase ): shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) accelerator.wait_for_everyone() snake_case : str = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: snake_case : List[Any] = eval_result if best_iteration is None: snake_case : List[Any] = new_iteration snake_case : int = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: snake_case : int = new_iteration snake_case : Union[str, Any] = new_eval_result snake_case : str = 0 else: if new_eval_result == best_eval_result: snake_case : Any = new_iteration snake_case : Union[str, Any] = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: snake_case : Tuple = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" , __lowerCamelCase ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCamelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
59
0
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def snake_case__ ( __lowerCamelCase : SplitDict ): """simple docstring""" lowerCamelCase__ : List[Any] =split_dict._to_yaml_list() assert len(__lowerCamelCase ) == len(__lowerCamelCase ) lowerCamelCase__ : List[Any] =SplitDict._from_yaml_list(__lowerCamelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCamelCase__ : Tuple =None # the split name of split_dict takes over the name of the split info object lowerCamelCase__ : str =split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=__lowerCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] ) def snake_case__ ( __lowerCamelCase : Dict ): """simple docstring""" # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCamelCase__ : Tuple =asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
238
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
59
0
from sklearn.metrics import matthews_corrcoef import datasets lowerCamelCase__ : List[Any] = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n' lowerCamelCase__ : Union[str, Any] = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n' lowerCamelCase__ : str = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : str ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=[ 'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html' ] , ) def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str]=None ): return { "matthews_correlation": float(matthews_corrcoef(snake_case__ , snake_case__ , sample_weight=snake_case__ ) ), }
225
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase ( A_ ): A__ : List[str] = "megatron-bert" def __init__(self : Optional[int] , snake_case__ : List[str]=2_90_56 , snake_case__ : List[Any]=10_24 , snake_case__ : str=24 , snake_case__ : Tuple=16 , snake_case__ : Union[str, Any]=40_96 , snake_case__ : str="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_12 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : int=0 , snake_case__ : Tuple="absolute" , snake_case__ : Any=True , **snake_case__ : Union[str, Any] , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) snake_case : Tuple = vocab_size snake_case : str = hidden_size snake_case : str = num_hidden_layers snake_case : str = num_attention_heads snake_case : Optional[int] = hidden_act snake_case : int = intermediate_size snake_case : List[str] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Dict = max_position_embeddings snake_case : List[str] = type_vocab_size snake_case : List[str] = initializer_range snake_case : Tuple = layer_norm_eps snake_case : int = position_embedding_type snake_case : str = use_cache
59
0
"""simple docstring""" def a_ ( lowerCamelCase ): if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True UpperCAmelCase__ = 4 UpperCAmelCase__ = (1 << p) - 1 for _ in range(p - 2 ): UpperCAmelCase__ = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
98
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> List[str]: '''simple docstring''' return f"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy""" def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int: '''simple docstring''' super().tearDown() gc.collect() def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 , snake_case__ : Any=(4, 4, 64, 64) , snake_case__ : List[Any]=False ) -> int: '''simple docstring''' snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa snake_case : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return image def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> List[Any]: '''simple docstring''' snake_case : List[str] = jnp.bfloataa if fpaa else jnp.floataa snake_case : str = "bf16" if fpaa else None snake_case , snake_case : Optional[int] = FlaxUNetaDConditionModel.from_pretrained( snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ ) return model, params def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=(4, 77, 7_68) , snake_case__ : Dict=False ) -> List[str]: '''simple docstring''' snake_case : Any = jnp.bfloataa if fpaa else jnp.floataa snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ) -> List[str]: '''simple docstring''' snake_case , snake_case : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_latents(snake_case__ , fpaa=snake_case__ ) snake_case : List[str] = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ ) snake_case : Dict = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Optional[int] = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> str: '''simple docstring''' snake_case , snake_case : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ ) snake_case : List[str] = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 10_24) , fpaa=snake_case__ ) snake_case : Optional[int] = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Dict = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
59
0
"""simple docstring""" from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class lowerCAmelCase__ : '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = None __UpperCamelCase = None UpperCAmelCase_ : Any = namedtuple("""CoinsDistribResult""", """moves excess""") def _A (__a ) -> Union[str, Any]: """simple docstring""" if root is None: return 0 # Validation def count_nodes(__a ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(__a ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(__lowerCamelCase ) != count_coins(__lowerCamelCase ): raise ValueError('''The nodes number should be same as the number of coins''' ) # Main calculation def get_distrib(__a ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) SCREAMING_SNAKE_CASE_ : List[str] = get_distrib(node.left ) SCREAMING_SNAKE_CASE_ : List[str] = get_distrib(node.right ) SCREAMING_SNAKE_CASE_ : List[str] = 1 - left_distrib_excess SCREAMING_SNAKE_CASE_ : int = 1 - right_distrib_excess SCREAMING_SNAKE_CASE_ : List[str] = ( left_distrib_moves + right_distrib_moves + abs(__lowerCamelCase ) + abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ : Tuple = node.data - coins_to_left - coins_to_right return CoinsDistribResult(__lowerCamelCase , __lowerCamelCase ) return get_distrib(__lowerCamelCase )[0] if __name__ == "__main__": import doctest doctest.testmod()
91
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def UpperCamelCase ( __lowerCamelCase : Dataset , __lowerCamelCase : Dict[str, str] ): snake_case : int = args.log_outputs snake_case : Dict = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric snake_case : List[str] = load_metric("wer" ) snake_case : Tuple = load_metric("cer" ) # compute metrics snake_case : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] ) snake_case : int = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results snake_case : int = f"""WER: {wer_result}\nCER: {cer_result}""" print(__lowerCamelCase ) with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f: f.write(__lowerCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case : int = f"""log_{dataset_id}_predictions.txt""" snake_case : List[Any] = f"""log_{dataset_id}_targets.txt""" with open(__lowerCamelCase , "w" ) as p, open(__lowerCamelCase , "w" ) as t: # mapping function to write output def write_to_file(__lowerCamelCase : str , __lowerCamelCase : Optional[int] ): p.write(f"""{i}""" + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f"""{i}""" + "\n" ) t.write(batch["target"] + "\n" ) result.map(__lowerCamelCase , with_indices=__lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : str ): snake_case : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case : List[Any] = re.sub(__lowerCamelCase , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case : Optional[Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: snake_case : Dict = " ".join(text.split(__lowerCamelCase ) ) return text def UpperCamelCase ( __lowerCamelCase : int ): # load dataset snake_case : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case : Union[str, Any] = feature_extractor.sampling_rate # resample audio snake_case : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=__lowerCamelCase ) ) # load eval pipeline if args.device is None: snake_case : List[str] = 0 if torch.cuda.is_available() else -1 snake_case : str = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__lowerCamelCase : int ): snake_case : Dict = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case : str = prediction["text"] snake_case : Tuple = normalize_text(batch["sentence"] ) return batch # run inference on all examples snake_case : Dict = dataset.map(__lowerCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __lowerCamelCase = parser.parse_args() main(args)
59
0
"""simple docstring""" import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowerCamelCase__ = logging.get_logger(__name__) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : int = set() __lowerCAmelCase : Tuple = [] def parse_line(_UpperCamelCase ): for line in fp: if isinstance(__lowerCamelCase , __lowerCamelCase ): __lowerCAmelCase : Tuple = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(__lowerCamelCase ) > 0: __lowerCAmelCase : List[str] = "\n".join(__lowerCamelCase ) # Only keep the warnings specified in `targets` if any(F": {x}: " in warning for x in targets ): selected_warnings.add(__lowerCamelCase ) buffer.clear() continue else: __lowerCAmelCase : Tuple = line.strip() buffer.append(__lowerCamelCase ) if from_gh: for filename in os.listdir(__lowerCamelCase ): __lowerCAmelCase : List[Any] = os.path.join(__lowerCamelCase , __lowerCamelCase ) if not os.path.isdir(__lowerCamelCase ): # read the file if filename != "warnings.txt": continue with open(__lowerCamelCase ) as fp: parse_line(__lowerCamelCase ) else: try: with zipfile.ZipFile(__lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(__lowerCamelCase ): # read the file if filename != "warnings.txt": continue with z.open(__lowerCamelCase ) as fp: parse_line(__lowerCamelCase ) except Exception: logger.warning( F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Union[str, Any] = set() __lowerCAmelCase : List[Any] = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(__lowerCamelCase , __lowerCamelCase ) ) return selected_warnings if __name__ == "__main__": def __lowerCAmelCase (_UpperCamelCase ): return values.split(',' ) lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowerCamelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowerCamelCase__ = extract_warnings(args.output_dir, args.targets) lowerCamelCase__ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
86
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class UpperCAmelCase ( A_ ): A__ : jnp.ndarray @flax_register_to_config class UpperCAmelCase ( nn.Module ,A_ ,A_ ): A__ : int = 32 A__ : int = 4 A__ : int = 4 A__ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) A__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") A__ : Union[bool, Tuple[bool]] = False A__ : Tuple[int] = (3_20, 6_40, 12_80, 12_80) A__ : int = 2 A__ : Union[int, Tuple[int]] = 8 A__ : Optional[Union[int, Tuple[int]]] = None A__ : int = 12_80 A__ : float = 0.0 A__ : bool = False A__ : jnp.dtype = jnp.floataa A__ : bool = True A__ : int = 0 A__ : bool = False def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' snake_case : Dict = (1, self.in_channels, self.sample_size, self.sample_size) snake_case : Any = jnp.zeros(snake_case__ , dtype=jnp.floataa ) snake_case : List[str] = jnp.ones((1,) , dtype=jnp.intaa ) snake_case : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case , snake_case : Optional[int] = jax.random.split(snake_case__ ) snake_case : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng} return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"] def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple: '''simple docstring''' snake_case : str = self.block_out_channels snake_case : Optional[Any] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case : Tuple = self.num_attention_heads or self.attention_head_dim # input snake_case : Tuple = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case : Union[str, Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case : Dict = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype ) snake_case : List[str] = self.only_cross_attention if isinstance(snake_case__ , snake_case__ ): snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(snake_case__ , snake_case__ ): snake_case : List[Any] = (num_attention_heads,) * len(self.down_block_types ) # down snake_case : List[Any] = [] snake_case : Optional[int] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): snake_case : List[Any] = output_channel snake_case : Dict = block_out_channels[i] snake_case : Optional[Any] = i == len(snake_case__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case : List[Any] = FlaxCrossAttnDownBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case : Union[str, Any] = FlaxDownBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(snake_case__ ) snake_case : Dict = down_blocks # mid snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up snake_case : Optional[Any] = [] snake_case : Optional[int] = list(reversed(snake_case__ ) ) snake_case : Dict = list(reversed(snake_case__ ) ) snake_case : Tuple = list(reversed(snake_case__ ) ) snake_case : Optional[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): snake_case : Optional[int] = output_channel snake_case : List[Any] = reversed_block_out_channels[i] snake_case : Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(snake_case__ ) - 1 )] snake_case : int = i == len(snake_case__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": snake_case : Any = FlaxCrossAttnUpBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case : Optional[int] = FlaxUpBlockaD( in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(snake_case__ ) snake_case : Optional[int] = output_channel snake_case : Tuple = up_blocks # out snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) snake_case : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : bool = True , snake_case__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(snake_case__ , jnp.ndarray ): snake_case : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case : Any = timesteps.astype(dtype=jnp.floataa ) snake_case : int = jnp.expand_dims(snake_case__ , 0 ) snake_case : str = self.time_proj(snake_case__ ) snake_case : str = self.time_embedding(snake_case__ ) # 2. pre-process snake_case : int = jnp.transpose(snake_case__ , (0, 2, 3, 1) ) snake_case : List[Any] = self.conv_in(snake_case__ ) # 3. down snake_case : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(snake_case__ , snake_case__ ): snake_case , snake_case : List[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train ) else: snake_case , snake_case : str = down_block(snake_case__ , snake_case__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: snake_case : Tuple = () for down_block_res_sample, down_block_additional_residual in zip( snake_case__ , snake_case__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) snake_case : Optional[int] = new_down_block_res_samples # 4. mid snake_case : Optional[int] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: snake_case : int = down_block_res_samples[-(self.layers_per_block + 1) :] snake_case : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(snake_case__ , snake_case__ ): snake_case : Optional[Any] = up_block( snake_case__ , temb=snake_case__ , encoder_hidden_states=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train , ) else: snake_case : Dict = up_block(snake_case__ , temb=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train ) # 6. post-process snake_case : List[str] = self.conv_norm_out(snake_case__ ) snake_case : Any = nn.silu(snake_case__ ) snake_case : Optional[int] = self.conv_out(snake_case__ ) snake_case : Union[str, Any] = jnp.transpose(snake_case__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=snake_case__ )
59
0
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> int: '''simple docstring''' return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = ArgumentParser( "HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowerCamelCase ) UpperCAmelCase_ = parser.add_subparsers(help="datasets-cli command helpers" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(__lowerCamelCase ) EnvironmentCommand.register_subcommand(__lowerCamelCase ) TestCommand.register_subcommand(__lowerCamelCase ) RunBeamCommand.register_subcommand(__lowerCamelCase ) DummyDataCommand.register_subcommand(__lowerCamelCase ) # Parse args UpperCAmelCase_ = parser.parse_known_args() if not hasattr(__lowerCamelCase , "func" ): parser.print_help() exit(1 ) UpperCAmelCase_ = parse_unknown_args(__lowerCamelCase ) # Run UpperCAmelCase_ = args.func(__lowerCamelCase , **__lowerCamelCase ) service.run() if __name__ == "__main__": main()
1
__lowerCamelCase = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.35_5818, } def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case : List[Any] = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {', '.join(__lowerCamelCase )}""" ) raise ValueError(__lowerCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
59
0
from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ = "bert-generation" def __init__( self , lowercase=50358 , lowercase=1024 , lowercase=24 , lowercase=16 , lowercase=4096 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=0 , lowercase=2 , lowercase=1 , lowercase="absolute" , lowercase=True , **lowercase , ) -> Optional[Any]: super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_act lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = position_embedding_type lowerCamelCase_ = use_cache
19
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , ): snake_case : int = {} if train_file is not None: snake_case : List[Any] = [train_file] if eval_file is not None: snake_case : Optional[int] = [eval_file] if test_file is not None: snake_case : Any = [test_file] snake_case : int = datasets.load_dataset("csv" , data_files=__lowerCamelCase ) snake_case : str = list(ds[list(files.keys() )[0]].features.keys() ) snake_case : int = features_name.pop(__lowerCamelCase ) snake_case : str = list(set(ds[list(files.keys() )[0]][label_name] ) ) snake_case : str = {label: i for i, label in enumerate(__lowerCamelCase )} snake_case : List[Any] = tokenizer.model_input_names snake_case : List[Any] = {} if len(__lowerCamelCase ) == 1: for k in files.keys(): snake_case : Tuple = ds[k].map( lambda __lowerCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) , batched=__lowerCamelCase , ) elif len(__lowerCamelCase ) == 2: for k in files.keys(): snake_case : List[Any] = ds[k].map( lambda __lowerCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) , batched=__lowerCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: snake_case : Dict = {k: v for k, v in ex.items() if k in input_names} snake_case : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: snake_case : str = {k: v for k, v in ex.items() if k in input_names} snake_case : Any = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: snake_case : str = {k: v for k, v in ex.items() if k in input_names} snake_case : List[str] = labelaid[ex[label_name]] yield (d, label) snake_case : int = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: snake_case : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) snake_case : Tuple = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: snake_case : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) snake_case : Optional[int] = ( tf.data.Dataset.from_generator( __lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: snake_case : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCamelCase = logging.getLogger(__name__) @dataclass class UpperCAmelCase : A__ : int = field(metadata={"help": "Which column contains the label"} ) A__ : str = field(default=A_ ,metadata={"help": "The path of the training file"} ) A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the development file"} ) A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the test file"} ) A__ : int = field( default=1_28 ,metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } ,) A__ : bool = field( default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} ) @dataclass class UpperCAmelCase : A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. A__ : Optional[str] = field( default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,) def UpperCamelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) snake_case , snake_case , snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case , snake_case , snake_case , snake_case : Tuple = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) snake_case : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): snake_case : int = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(__lowerCamelCase : EvalPrediction ) -> Dict: snake_case : Optional[int] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer snake_case : int = TFTrainer( model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case : int = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) snake_case : Any = trainer.evaluate() snake_case : List[Any] = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__lowerCamelCase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(__lowerCamelCase ) return results if __name__ == "__main__": main()
59
0
from __future__ import annotations def UpperCAmelCase ( a_ ) -> Union[str, Any]: """simple docstring""" return len(set(__lowerCamelCase ) ) == len(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
15
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]: '''simple docstring''' snake_case : int = tempfile.mkdtemp() # fmt: off snake_case : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) snake_case : int = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } snake_case : Optional[Any] = os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : str ) -> Optional[int]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : List[str] ) -> int: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> str: '''simple docstring''' snake_case : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] snake_case : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = self.get_tokenizer() snake_case : Optional[Any] = self.get_image_processor() snake_case : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor.save_pretrained(self.tmpdirname ) snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]: '''simple docstring''' snake_case : str = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) snake_case : Tuple = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) snake_case : List[str] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int: '''simple docstring''' snake_case : str = self.get_image_processor() snake_case : Optional[int] = self.get_tokenizer() snake_case : List[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Optional[Any] = self.prepare_image_inputs() snake_case : str = image_processor(snake_case__ , return_tensors="np" ) snake_case : Any = processor(images=snake_case__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Dict = self.get_image_processor() snake_case : int = self.get_tokenizer() snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Tuple = "lower newer" snake_case : Tuple = processor(text=snake_case__ ) snake_case : Union[str, Any] = tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]: '''simple docstring''' snake_case : List[Any] = self.get_image_processor() snake_case : Dict = self.get_tokenizer() snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : int = "lower newer" snake_case : Dict = self.prepare_image_inputs() snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(snake_case__ ): processor() def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple: '''simple docstring''' snake_case : Tuple = self.get_image_processor() snake_case : Optional[Any] = self.get_tokenizer() snake_case : Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case : List[Any] = processor.batch_decode(snake_case__ ) snake_case : Union[str, Any] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : str = self.get_image_processor() snake_case : Union[str, Any] = self.get_tokenizer() snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) snake_case : Optional[Any] = "lower newer" snake_case : List[Any] = self.prepare_image_inputs() snake_case : Tuple = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
59
0
"""simple docstring""" import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase__ ( A_ ): """simple docstring""" __a = (DDIMParallelScheduler,) __a = (("eta", 0.0), ("num_inference_steps", 50)) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Any = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**snake_case__ ) return config def lowerCamelCase__ ( self : Dict , **UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.scheduler_classes[0] __UpperCAmelCase : Any = self.get_scheduler_config(**snake_case__ ) __UpperCAmelCase : Any = scheduler_class(**snake_case__ ) __UpperCAmelCase : Union[str, Any] = 10, 0.0 __UpperCAmelCase : List[Any] = self.dummy_model() __UpperCAmelCase : Any = self.dummy_sample_deter scheduler.set_timesteps(snake_case__ ) for t in scheduler.timesteps: __UpperCAmelCase : Optional[int] = model(snake_case__ , snake_case__ ) __UpperCAmelCase : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample return sample def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for timesteps in [100, 500, 1_000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def lowerCamelCase__ ( self : str ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=snake_case__ ) __UpperCAmelCase : Optional[int] = self.scheduler_classes[0] __UpperCAmelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 ) __UpperCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def lowerCamelCase__ ( self : int ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def lowerCamelCase__ ( self : str ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=snake_case__ ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=snake_case__ ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=snake_case__ ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=snake_case__ , eta=snake_case__ ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = self.scheduler_classes[0] __UpperCAmelCase : Tuple = self.get_scheduler_config() __UpperCAmelCase : Dict = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5 def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0] __UpperCAmelCase : List[Any] = self.get_scheduler_config() __UpperCAmelCase : int = scheduler_class(**snake_case__ ) __UpperCAmelCase : Any = 10, 0.0 scheduler.set_timesteps(snake_case__ ) __UpperCAmelCase : Optional[Any] = self.dummy_model() __UpperCAmelCase : str = self.dummy_sample_deter __UpperCAmelCase : Dict = self.dummy_sample_deter + 0.1 __UpperCAmelCase : Dict = self.dummy_sample_deter - 0.1 __UpperCAmelCase : Optional[Any] = samplea.shape[0] __UpperCAmelCase : str = torch.stack([samplea, samplea, samplea] , dim=0 ) __UpperCAmelCase : Tuple = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ ) __UpperCAmelCase : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __UpperCAmelCase : List[str] = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ ) __UpperCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) ) __UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.full_loop() __UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case__ ) ) __UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.223967 ) < 1e-3 def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Dict = self.full_loop(prediction_type="""v_prediction""" ) __UpperCAmelCase : int = torch.sum(torch.abs(snake_case__ ) ) __UpperCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Dict = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) __UpperCAmelCase : str = torch.sum(torch.abs(snake_case__ ) ) __UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) __UpperCAmelCase : Tuple = torch.sum(torch.abs(snake_case__ ) ) __UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
115
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
59
0
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __lowerCAmelCase : Any ="\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" __lowerCAmelCase : Dict ="\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" __lowerCAmelCase : Union[str, Any] ="\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def UpperCAmelCase_ ( self :Optional[Any] )-> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def UpperCAmelCase_ ( self :int , lowercase_ :List[List[List[str]]] , lowercase_ :List[List[str]] , lowercase_ :int = 1 , lowercase_ :int = 4 , )-> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=snake_case__ , hypotheses=snake_case__ , min_len=snake_case__ , max_len=snake_case__ ) }
237
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase : def __init__(self : Dict , snake_case__ : Dict , snake_case__ : Any=13 , snake_case__ : Any=32 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : List[Any]=16 , snake_case__ : int=[1, 2, 1] , snake_case__ : Dict=[2, 2, 4] , snake_case__ : Dict=2 , snake_case__ : Tuple=2.0 , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=True , snake_case__ : List[str]=0.02 , snake_case__ : int=1e-5 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=10 , snake_case__ : Optional[Any]=8 , snake_case__ : Any=["stage1", "stage2", "stage3"] , snake_case__ : Tuple=[1, 2, 3] , ) -> Union[str, Any]: '''simple docstring''' snake_case : Any = parent snake_case : Optional[int] = batch_size snake_case : Union[str, Any] = image_size snake_case : Dict = patch_size snake_case : Optional[Any] = num_channels snake_case : Union[str, Any] = embed_dim snake_case : int = depths snake_case : List[str] = num_heads snake_case : Union[str, Any] = window_size snake_case : Union[str, Any] = mlp_ratio snake_case : List[Any] = qkv_bias snake_case : List[Any] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Union[str, Any] = drop_path_rate snake_case : int = hidden_act snake_case : Optional[int] = use_absolute_embeddings snake_case : int = patch_norm snake_case : Union[str, Any] = layer_norm_eps snake_case : Any = initializer_range snake_case : Optional[Any] = is_training snake_case : Tuple = scope snake_case : Optional[int] = use_labels snake_case : Optional[Any] = type_sequence_label_size snake_case : Union[str, Any] = encoder_stride snake_case : Any = out_features snake_case : Tuple = out_indices def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case : int = None if self.use_labels: snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : Dict = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int: '''simple docstring''' return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) snake_case : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> str: '''simple docstring''' snake_case : Optional[int] = MaskFormerSwinBackbone(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case__ ): snake_case : Tuple = ["stem"] snake_case : List[Any] = MaskFormerSwinBackbone(config=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' snake_case : Union[str, Any] = self.prepare_config_and_inputs() snake_case , snake_case , snake_case : List[Any] = config_and_inputs snake_case : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ): A__ : List[str] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) A__ : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} A__ : Optional[Any] = False A__ : List[Any] = False A__ : List[str] = False A__ : List[str] = False A__ : Union[str, Any] = False def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : str = MaskFormerSwinModelTester(self ) snake_case : Optional[int] = ConfigTester(self , config_class=snake_case__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]: '''simple docstring''' return def _SCREAMING_SNAKE_CASE (self : Dict ) -> str: '''simple docstring''' snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Dict: '''simple docstring''' snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case__ ) @unittest.skip("Swin does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' pass @unittest.skip("Swin does not support feedforward chunking" ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : int = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : str = model_class(snake_case__ ) snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case : Optional[Any] = [*signature.parameters.keys()] snake_case : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str: '''simple docstring''' pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ) -> Optional[int]: '''simple docstring''' snake_case : Tuple = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): snake_case : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) snake_case : int = outputs.hidden_states snake_case : Union[str, Any] = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) # Swin has a different seq_length snake_case : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case : int = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Dict = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Any = 3 snake_case : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case : str = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Optional[Any] = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : int ) -> str: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case__ : Union[str, Any] ): snake_case : Any = 0 return t def check_equivalence(snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[int]={} ): with torch.no_grad(): snake_case : Optional[Any] = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ) snake_case : Tuple = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple() def recursive_check(snake_case__ : List[str] , snake_case__ : Optional[Any] ): if isinstance(snake_case__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ): recursive_check(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case__ , snake_case__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1e-5 ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has""" f""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}.""" ) , ) recursive_check(snake_case__ , snake_case__ ) for model_class in self.all_model_classes: snake_case : Optional[int] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Dict = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) snake_case : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) @require_torch class UpperCAmelCase ( unittest.TestCase ,A_ ): A__ : int = (MaskFormerSwinBackbone,) if is_torch_available() else () A__ : int = MaskFormerSwinConfig def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModelTester(self ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Optional[int] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: snake_case : Optional[int] = backbone_class(snake_case__ ) backbone.to(snake_case__ ) backbone.eval() snake_case : Union[str, Any] = backbone(**snake_case__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case : Optional[int] = backbone(**snake_case__ , output_hidden_states=snake_case__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case , snake_case , snake_case : Dict = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case : Optional[Any] = backbone(**snake_case__ , output_attentions=snake_case__ ) self.assertIsNotNone(outputs.attentions )
59
0
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class lowerCamelCase_ : '''simple docstring''' def __init__( self) -> Any: __UpperCamelCase :Tuple = "" __UpperCamelCase :str = "" __UpperCamelCase :Dict = [] __UpperCamelCase :str = 0 __UpperCamelCase :Tuple = 256 __UpperCamelCase :Optional[Any] = 0 __UpperCamelCase :Union[str, Any] = 0 __UpperCamelCase :Any = 0 __UpperCamelCase :Union[str, Any] = 0 def UpperCamelCase__ ( self , __lowercase) -> List[Any]: __UpperCamelCase :Optional[Any] = cva.imread(snake_case__ , 0) __UpperCamelCase :Union[str, Any] = copy.deepcopy(self.img) __UpperCamelCase :Tuple = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''') __UpperCamelCase :Any = np.sum(snake_case__) for i in range(len(snake_case__)): __UpperCamelCase :Union[str, Any] = x[i] / self.k self.sk += prk __UpperCamelCase :Optional[Any] = (self.L - 1) * self.sk if self.rem != 0: __UpperCamelCase :List[str] = int(last % last) __UpperCamelCase :List[str] = int(last + 1 if self.rem >= 0.5 else last) self.last_list.append(snake_case__) __UpperCamelCase :int = int(np.ma.count(self.img) / self.img[1].size) __UpperCamelCase :Optional[int] = self.img[1].size for i in range(self.number_of_cols): for j in range(self.number_of_rows): __UpperCamelCase :Union[str, Any] = self.img[j][i] if num != self.last_list[num]: __UpperCamelCase :Dict = self.last_list[num] cva.imwrite('''output_data/output.jpg''' , self.img) def UpperCamelCase__ ( self) -> List[Any]: plt.hist(self.img.ravel() , 256 , [0, 256]) def UpperCamelCase__ ( self) -> int: cva.imshow('''Output-Image''' , self.img) cva.imshow('''Input-Image''' , self.original_image) cva.waitKey(5_000) cva.destroyAllWindows() if __name__ == "__main__": __lowercase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') __lowercase = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
43
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : List[str] = [] snake_case : Optional[int] = [] snake_case : Any = [] for rt in rc.restypes: snake_case : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) snake_case : str = {name: i for i, name in enumerate(__lowerCamelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) snake_case : Optional[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : List[Any] = torch.tensor( __lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) snake_case : int = torch.tensor( __lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) snake_case : int = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein snake_case : List[Any] = restype_atomaa_to_atomaa[protein_aatype] snake_case : str = restype_atomaa_mask[protein_aatype] snake_case : str = residx_atomaa_mask snake_case : Any = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back snake_case : List[str] = restype_atomaa_to_atomaa[protein_aatype] snake_case : List[Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask snake_case : Union[str, Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device ) for restype, restype_letter in enumerate(rc.restypes ): snake_case : Optional[int] = rc.restype_atoa[restype_letter] snake_case : Any = rc.residue_atoms[restype_name] for atom_name in atom_names: snake_case : List[Any] = rc.atom_order[atom_name] snake_case : Optional[Any] = 1 snake_case : List[Any] = restype_atomaa_mask[protein_aatype] snake_case : int = residx_atomaa_mask return protein def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ): snake_case : Dict = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray ) snake_case : List[str] = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) ) return out
59
0
"""simple docstring""" import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict="shi-labs/oneformer_demo" ): """simple docstring""" with open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) as f: lowerCamelCase__ : Optional[int] =json.load(__lowerCamelCase ) lowerCamelCase__ : Optional[int] ={} lowerCamelCase__ : Any =[] lowerCamelCase__ : List[str] =[] for key, info in class_info.items(): lowerCamelCase__ : Union[str, Any] =info["name"] class_names.append(info['''name'''] ) if info["isthing"]: thing_ids.append(int(__lowerCamelCase ) ) lowerCamelCase__ : List[Any] =thing_ids lowerCamelCase__ : Optional[Any] =class_names return metadata class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Optional[Any]=7, lowerCamelCase : str=3, lowerCamelCase : Optional[Any]=30, lowerCamelCase : Union[str, Any]=400, lowerCamelCase : List[Any]=None, lowerCamelCase : Any=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : str=[0.5, 0.5, 0.5], lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5], lowerCamelCase : List[str]=10, lowerCamelCase : int=False, lowerCamelCase : str=255, lowerCamelCase : List[Any]="shi-labs/oneformer_demo", lowerCamelCase : Optional[Any]="ade20k_panoptic.json", lowerCamelCase : str=10, )-> Tuple: lowerCamelCase__ : int =parent lowerCamelCase__ : List[Any] =batch_size lowerCamelCase__ : str =num_channels lowerCamelCase__ : Dict =min_resolution lowerCamelCase__ : int =max_resolution lowerCamelCase__ : Dict =do_resize lowerCamelCase__ : Union[str, Any] ={"shortest_edge": 32, "longest_edge": 1333} if size is None else size lowerCamelCase__ : Optional[int] =do_normalize lowerCamelCase__ : Optional[int] =image_mean lowerCamelCase__ : Dict =image_std lowerCamelCase__ : List[str] =class_info_file lowerCamelCase__ : int =prepare_metadata(snake_case__, snake_case__ ) lowerCamelCase__ : Optional[int] =num_text lowerCamelCase__ : Any =repo_path # for the post_process_functions lowerCamelCase__ : Optional[int] =2 lowerCamelCase__ : Optional[int] =10 lowerCamelCase__ : List[str] =10 lowerCamelCase__ : Optional[int] =3 lowerCamelCase__ : Union[str, Any] =4 lowerCamelCase__ : str =num_labels lowerCamelCase__ : Optional[Any] =do_reduce_labels lowerCamelCase__ : int =ignore_index def snake_case ( self : Optional[int] )-> Dict: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def snake_case ( self : List[str], lowerCamelCase : str, lowerCamelCase : Optional[int]=False )-> Tuple: if not batched: lowerCamelCase__ : Union[str, Any] =image_inputs[0] if isinstance(snake_case__, Image.Image ): lowerCamelCase__ : List[Any] =image.size else: lowerCamelCase__ : Dict =image.shape[1], image.shape[2] if w < h: lowerCamelCase__ : int =int(self.size['''shortest_edge'''] * h / w ) lowerCamelCase__ : str =self.size["shortest_edge"] elif w > h: lowerCamelCase__ : int =self.size["shortest_edge"] lowerCamelCase__ : List[str] =int(self.size['''shortest_edge'''] * w / h ) else: lowerCamelCase__ : Dict =self.size["shortest_edge"] lowerCamelCase__ : Optional[Any] =self.size["shortest_edge"] else: lowerCamelCase__ : List[Any] =[] for image in image_inputs: lowerCamelCase__ : List[Any] =self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase__ : Union[str, Any] =max(snake_case__, key=lambda lowerCamelCase : item[0] )[0] lowerCamelCase__ : int =max(snake_case__, key=lambda lowerCamelCase : item[1] )[1] return expected_height, expected_width def snake_case ( self : Any )-> List[Any]: return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ), ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( A_ , unittest.TestCase ): '''simple docstring''' _a = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string _a = image_processing_class def snake_case ( self : Optional[int] )-> Tuple: lowerCamelCase__ : Tuple =OneFormerImageProcessorTester(self ) @property def snake_case ( self : Optional[int] )-> Optional[int]: return self.image_processing_tester.prepare_image_processor_dict() def snake_case ( self : Optional[Any] )-> List[str]: lowerCamelCase__ : str =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__, '''image_mean''' ) ) self.assertTrue(hasattr(snake_case__, '''image_std''' ) ) self.assertTrue(hasattr(snake_case__, '''do_normalize''' ) ) self.assertTrue(hasattr(snake_case__, '''do_resize''' ) ) self.assertTrue(hasattr(snake_case__, '''size''' ) ) self.assertTrue(hasattr(snake_case__, '''ignore_index''' ) ) self.assertTrue(hasattr(snake_case__, '''class_info_file''' ) ) self.assertTrue(hasattr(snake_case__, '''num_text''' ) ) self.assertTrue(hasattr(snake_case__, '''repo_path''' ) ) self.assertTrue(hasattr(snake_case__, '''metadata''' ) ) self.assertTrue(hasattr(snake_case__, '''do_reduce_labels''' ) ) def snake_case ( self : Optional[int] )-> str: pass def snake_case ( self : Dict )-> Any: lowerCamelCase__ : Optional[int] =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ : Dict =prepare_image_inputs(self.image_processing_tester, equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__, Image.Image ) # Test not batched input lowerCamelCase__ : Optional[int] =image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values lowerCamelCase__ : Optional[Any] =self.image_processing_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase__ : Optional[int] =self.image_processing_tester.get_expected_values(snake_case__, batched=snake_case__ ) lowerCamelCase__ : str =image_processor( snake_case__, ['''semantic'''] * len(snake_case__ ), return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ), ) def snake_case ( self : Union[str, Any] )-> Tuple: lowerCamelCase__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : List[str] =prepare_image_inputs(self.image_processing_tester, equal_resolution=snake_case__, numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__, np.ndarray ) # Test not batched input lowerCamelCase__ : Union[str, Any] =image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values lowerCamelCase__ : int =self.image_processing_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase__ : Union[str, Any] =self.image_processing_tester.get_expected_values(snake_case__, batched=snake_case__ ) lowerCamelCase__ : Optional[int] =image_processor( snake_case__, ['''semantic'''] * len(snake_case__ ), return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ), ) def snake_case ( self : Union[str, Any] )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : Any =prepare_image_inputs(self.image_processing_tester, equal_resolution=snake_case__, torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__, torch.Tensor ) # Test not batched input lowerCamelCase__ : Any =image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values lowerCamelCase__ : Union[str, Any] =self.image_processing_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase__ : Optional[int] =self.image_processing_tester.get_expected_values(snake_case__, batched=snake_case__ ) lowerCamelCase__ : List[str] =image_processor( snake_case__, ['''semantic'''] * len(snake_case__ ), return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ), ) def snake_case ( self : Dict, lowerCamelCase : Dict=False, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : str="np" )-> Optional[int]: lowerCamelCase__ : Optional[int] =self.image_processing_class(**self.image_processor_dict ) # prepare image and target lowerCamelCase__ : int =self.image_processing_tester.num_labels lowerCamelCase__ : Tuple =None lowerCamelCase__ : List[str] =None lowerCamelCase__ : Optional[int] =prepare_image_inputs(self.image_processing_tester, equal_resolution=snake_case__ ) if with_segmentation_maps: lowerCamelCase__ : List[str] =num_labels if is_instance_map: lowerCamelCase__ : int =list(range(snake_case__ ) ) * 2 lowerCamelCase__ : Union[str, Any] =dict(enumerate(snake_case__ ) ) lowerCamelCase__ : Optional[Any] =[ np.random.randint(0, high * 2, (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": lowerCamelCase__ : List[str] =[Image.fromarray(snake_case__ ) for annotation in annotations] lowerCamelCase__ : Dict =image_processor( snake_case__, ['''semantic'''] * len(snake_case__ ), snake_case__, return_tensors='''pt''', instance_id_to_semantic_id=snake_case__, pad_and_return_pixel_mask=snake_case__, ) return inputs def snake_case ( self : int )-> Dict: pass def snake_case ( self : Any )-> Dict: def common(lowerCamelCase : List[str]=False, lowerCamelCase : Optional[Any]=None ): lowerCamelCase__ : Dict =self.comm_get_image_processor_inputs( with_segmentation_maps=snake_case__, is_instance_map=snake_case__, segmentation_type=snake_case__ ) lowerCamelCase__ : List[str] =inputs["mask_labels"] lowerCamelCase__ : Tuple =inputs["class_labels"] lowerCamelCase__ : Dict =inputs["pixel_values"] lowerCamelCase__ : Tuple =inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(snake_case__, snake_case__, snake_case__ ): self.assertEqual(mask_label.shape[0], class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:] ) self.assertEqual(len(snake_case__ ), self.image_processing_tester.num_text ) common() common(is_instance_map=snake_case__ ) common(is_instance_map=snake_case__, segmentation_type='''pil''' ) common(is_instance_map=snake_case__, segmentation_type='''pil''' ) def snake_case ( self : List[Any] )-> Union[str, Any]: lowerCamelCase__ : Any =np.zeros((20, 50) ) lowerCamelCase__ : List[str] =1 lowerCamelCase__ : List[str] =1 lowerCamelCase__ : Optional[Any] =1 lowerCamelCase__ : int =binary_mask_to_rle(snake_case__ ) self.assertEqual(len(snake_case__ ), 4 ) self.assertEqual(rle[0], 21 ) self.assertEqual(rle[1], 45 ) def snake_case ( self : Optional[Any] )-> int: lowerCamelCase__ : int =self.image_processing_class( num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', ) lowerCamelCase__ : Optional[int] =self.image_processing_tester.get_fake_oneformer_outputs() lowerCamelCase__ : Tuple =fature_extractor.post_process_semantic_segmentation(snake_case__ ) self.assertEqual(len(snake_case__ ), self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape, ( self.image_processing_tester.height, self.image_processing_tester.width, ), ) lowerCamelCase__ : Union[str, Any] =[(1, 4) for i in range(self.image_processing_tester.batch_size )] lowerCamelCase__ : Optional[int] =fature_extractor.post_process_semantic_segmentation(snake_case__, target_sizes=snake_case__ ) self.assertEqual(segmentation[0].shape, target_sizes[0] ) def snake_case ( self : Tuple )-> List[str]: lowerCamelCase__ : str =self.image_processing_class( num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', ) lowerCamelCase__ : int =self.image_processing_tester.get_fake_oneformer_outputs() lowerCamelCase__ : List[str] =image_processor.post_process_instance_segmentation(snake_case__, threshold=0 ) self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ), snake_case__ ) self.assertEqual( el['''segmentation'''].shape, (self.image_processing_tester.height, self.image_processing_tester.width) ) def snake_case ( self : str )-> str: lowerCamelCase__ : Tuple =self.image_processing_class( num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', ) lowerCamelCase__ : Dict =self.image_processing_tester.get_fake_oneformer_outputs() lowerCamelCase__ : List[Any] =image_processor.post_process_panoptic_segmentation(snake_case__, threshold=0 ) self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ), snake_case__ ) self.assertEqual( el['''segmentation'''].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
238
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __lowerCamelCase = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } __lowerCamelCase = { """unc-nlp/lxmert-base-uncased""": 5_12, } __lowerCamelCase = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class UpperCAmelCase ( A_ ): A__ : Any = VOCAB_FILES_NAMES A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A__ : Tuple = PRETRAINED_INIT_CONFIGURATION A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : List[Any] = LxmertTokenizer def __init__(self : Dict , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Optional[Any]="[SEP]" , snake_case__ : Optional[Any]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : Tuple="[MASK]" , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Dict , ) -> Optional[int]: '''simple docstring''' super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars ): snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) ) snake_case : str = do_lower_case snake_case : List[Any] = strip_accents snake_case : Optional[int] = tokenize_chinese_chars snake_case : int = normalizer_class(**snake_case__ ) snake_case : Optional[Any] = do_lower_case def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ) -> Any: '''simple docstring''' snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case : Optional[Any] = [self.sep_token_id] snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' snake_case : List[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
59
0
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = tmp_path / "file.csv" SCREAMING_SNAKE_CASE_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(__lowerCamelCase , 'w' ) as f: f.write(__lowerCamelCase ) return str(__lowerCamelCase ) @pytest.fixture def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = tmp_path / "malformed_file.csv" SCREAMING_SNAKE_CASE_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(__lowerCamelCase , 'w' ) as f: f.write(__lowerCamelCase ) return str(__lowerCamelCase ) @pytest.fixture def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) -> Any: SCREAMING_SNAKE_CASE_ = tmp_path / "csv_with_image.csv" SCREAMING_SNAKE_CASE_ = textwrap.dedent( f"\\n image\n {image_file}\n " ) with open(__lowerCamelCase , 'w' ) as f: f.write(__lowerCamelCase ) return str(__lowerCamelCase ) @pytest.fixture def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE_ = tmp_path / "csv_with_label.csv" SCREAMING_SNAKE_CASE_ = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(__lowerCamelCase , 'w' ) as f: f.write(__lowerCamelCase ) return str(__lowerCamelCase ) @pytest.fixture def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int: SCREAMING_SNAKE_CASE_ = tmp_path / "csv_with_int_list.csv" SCREAMING_SNAKE_CASE_ = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(__lowerCamelCase , 'w' ) as f: f.write(__lowerCamelCase ) return str(__lowerCamelCase ) def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = Csv() SCREAMING_SNAKE_CASE_ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(__lowerCamelCase , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(__lowerCamelCase ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> str: with open(__lowerCamelCase , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE_ = f.read().splitlines()[1] SCREAMING_SNAKE_CASE_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) SCREAMING_SNAKE_CASE_ = csv._generate_tables([[csv_file_with_image]] ) SCREAMING_SNAKE_CASE_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() SCREAMING_SNAKE_CASE_ = pa_table.to_pydict()["image"] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> Union[str, Any]: with open(__lowerCamelCase , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE_ = f.read().splitlines()[1:] SCREAMING_SNAKE_CASE_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) SCREAMING_SNAKE_CASE_ = csv._generate_tables([[csv_file_with_label]] ) SCREAMING_SNAKE_CASE_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() SCREAMING_SNAKE_CASE_ = pa_table.to_pydict()["label"] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(__lowerCamelCase ) for label in labels] def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda __UpperCAmelCase : [int(__lowerCamelCase ) for i in x.split()]} ) SCREAMING_SNAKE_CASE_ = csv._generate_tables([[csv_file_with_int_list]] ) SCREAMING_SNAKE_CASE_ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) SCREAMING_SNAKE_CASE_ = pa_table.to_pydict()["int_list"] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
225
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase ( A_ ): A__ : Dict = (DDIMParallelScheduler,) A__ : Tuple = (("eta", 0.0), ("num_inference_steps", 50)) def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case : Any = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**snake_case__ ) return config def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Optional[int] ) -> Any: '''simple docstring''' snake_case : List[Any] = self.scheduler_classes[0] snake_case : Any = self.get_scheduler_config(**snake_case__ ) snake_case : Any = scheduler_class(**snake_case__ ) snake_case , snake_case : Union[str, Any] = 10, 0.0 snake_case : List[Any] = self.dummy_model() snake_case : Any = self.dummy_sample_deter scheduler.set_timesteps(snake_case__ ) for t in scheduler.timesteps: snake_case : Optional[int] = model(snake_case__ , snake_case__ ) snake_case : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample return sample def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str: '''simple docstring''' for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=snake_case__ ) snake_case : Optional[int] = self.scheduler_classes[0] snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 ) snake_case : Union[str, Any] = scheduler_class(**snake_case__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : str ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]: '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any: '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]: '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=snake_case__ , eta=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case : Dict = self.scheduler_classes[0] snake_case : Tuple = self.get_scheduler_config() snake_case : Dict = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5 def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict: '''simple docstring''' snake_case : Union[str, Any] = self.scheduler_classes[0] snake_case : List[Any] = self.get_scheduler_config() snake_case : int = scheduler_class(**snake_case__ ) snake_case , snake_case : Any = 10, 0.0 scheduler.set_timesteps(snake_case__ ) snake_case : Optional[Any] = self.dummy_model() snake_case : str = self.dummy_sample_deter snake_case : Dict = self.dummy_sample_deter + 0.1 snake_case : Dict = self.dummy_sample_deter - 0.1 snake_case : Optional[Any] = samplea.shape[0] snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 ) snake_case : Tuple = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ ) snake_case : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) snake_case : List[str] = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ ) snake_case : Dict = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case : List[Any] = self.full_loop() snake_case : Optional[Any] = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.223967 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = self.full_loop(prediction_type="v_prediction" ) snake_case : int = torch.sum(torch.abs(snake_case__ ) ) snake_case : Optional[int] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]: '''simple docstring''' snake_case : Dict = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) snake_case : str = torch.sum(torch.abs(snake_case__ ) ) snake_case : Optional[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]: '''simple docstring''' snake_case : int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) snake_case : Tuple = torch.sum(torch.abs(snake_case__ ) ) snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
59
0
"""simple docstring""" from collections.abc import Callable def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = a UpperCAmelCase__ = b if function(__lowerCamelCase ) == 0: # one of the a or b is a root for the function return a elif function(__lowerCamelCase ) == 0: return b elif ( function(__lowerCamelCase ) * function(__lowerCamelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: UpperCAmelCase__ = start + (end - start) / 2.0 while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7 if function(__lowerCamelCase ) == 0: return mid elif function(__lowerCamelCase ) * function(__lowerCamelCase ) < 0: UpperCAmelCase__ = mid else: UpperCAmelCase__ = mid UpperCAmelCase__ = start + (end - start) / 2.0 return mid def a_ ( lowerCamelCase ): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
98
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ): snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] snake_case : int = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(__lowerCamelCase ) <= key: return input_string for position, character in enumerate(__lowerCamelCase ): snake_case : Any = position % (lowest * 2) # puts it in bounds snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__lowerCamelCase ) snake_case : List[str] = ["".join(__lowerCamelCase ) for row in temp_grid] snake_case : Tuple = "".join(__lowerCamelCase ) return output_string def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ): snake_case : Dict = [] snake_case : Union[str, Any] = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] # generates template for position in range(len(__lowerCamelCase ) ): snake_case : List[str] = position % (lowest * 2) # puts it in bounds snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) snake_case : Tuple = 0 for row in temp_grid: # fills in the characters snake_case : Union[str, Any] = input_string[counter : counter + len(__lowerCamelCase )] grid.append(list(__lowerCamelCase ) ) counter += len(__lowerCamelCase ) snake_case : str = "" # reads as zigzag for position in range(len(__lowerCamelCase ) ): snake_case : Optional[int] = position % (lowest * 2) # puts it in bounds snake_case : Tuple = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def UpperCamelCase ( __lowerCamelCase : str ): snake_case : Tuple = {} for key_guess in range(1 , len(__lowerCamelCase ) ): # tries every key snake_case : Any = decrypt(__lowerCamelCase , __lowerCamelCase ) return results if __name__ == "__main__": import doctest doctest.testmod()
59
0
"""simple docstring""" import csv import tweepy # Twitter API credentials UpperCAmelCase_ : Union[str, Any] = """""" UpperCAmelCase_ : Any = """""" UpperCAmelCase_ : List[Any] = """""" UpperCAmelCase_ : Tuple = """""" def _A (__a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = tweepy.OAuthHandler(__lowerCamelCase , __lowerCamelCase ) auth.set_access_token(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE_ : int = tweepy.API(__lowerCamelCase ) # initialize a list to hold all the tweepy Tweets SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] # make initial request for most recent tweets (200 is the maximum allowed count) SCREAMING_SNAKE_CASE_ : Dict = api.user_timeline(screen_name=__lowerCamelCase , count=2_00 ) # save most recent tweets alltweets.extend(__lowerCamelCase ) # save the id of the oldest tweet less one SCREAMING_SNAKE_CASE_ : Tuple = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__lowerCamelCase ) > 0: print(f'getting tweets before {oldest}' ) # all subsequent requests use the max_id param to prevent duplicates SCREAMING_SNAKE_CASE_ : List[Any] = api.user_timeline( screen_name=__lowerCamelCase , count=2_00 , max_id=__lowerCamelCase ) # save most recent tweets alltweets.extend(__lowerCamelCase ) # update the id of the oldest tweet less one SCREAMING_SNAKE_CASE_ : List[Any] = alltweets[-1].id - 1 print(f'...{len(__lowerCamelCase )} tweets downloaded so far' ) # transform the tweepy tweets into a 2D array that will populate the csv SCREAMING_SNAKE_CASE_ : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'new_{screen_name}_tweets.csv' , '''w''' ) as f: SCREAMING_SNAKE_CASE_ : str = csv.writer(__lowerCamelCase ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(__lowerCamelCase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
91
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __lowerCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __lowerCamelCase = TaTokenizerFast __lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __lowerCamelCase = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
59
0
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) lowerCamelCase__ = logging.getLogger() def __lowerCAmelCase (): __lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument('-f' ) __lowerCAmelCase : Tuple = parser.parse_args() return args.f class A__ ( A_): def __lowerCamelCase ( self ): __lowerCAmelCase : int = logging.StreamHandler(sys.stdout ) logger.addHandler(snake_case__ ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , 'run_glue_deebert.py' ) with patch.object(snake_case__ , 'argv' , snake_case__ ): __lowerCAmelCase : List[Any] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(snake_case__ , 0.666 ) @slow @require_torch_non_multi_gpu def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(snake_case__ ) __lowerCAmelCase : int = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(snake_case__ ) __lowerCAmelCase : int = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(snake_case__ )
86
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class UpperCAmelCase : def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]: '''simple docstring''' logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) snake_case : Optional[Any] = model snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ ) snake_case : int = kwargs.get("latest_model_name" , snake_case__ ) def __call__(self : Tuple , **snake_case__ : str ) -> List[str]: '''simple docstring''' snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()} return self.model.run(snake_case__ , snake_case__ ) @staticmethod def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any: '''simple docstring''' if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) snake_case : Optional[int] = "CPUExecutionProvider" return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]: '''simple docstring''' snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name ) snake_case : str = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ ) if src_path.exists(): snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str: '''simple docstring''' if os.path.isfile(snake_case__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(snake_case__ , exist_ok=snake_case__ ) # saving model weights/files self._save_pretrained(snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple: '''simple docstring''' snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(snake_case__ ): snake_case : Any = OnnxRuntimeModel.load_model( os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ ) snake_case : Union[str, Any] = Path(snake_case__ ) # load model from hub else: # download model snake_case : Dict = hf_hub_download( repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , ) snake_case : List[Any] = Path(snake_case__ ).parent snake_case : Union[str, Any] = Path(snake_case__ ).name snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ ) return cls(model=snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = None if len(str(snake_case__ ).split("@" ) ) == 2: snake_case , snake_case : int = model_id.split("@" ) return cls._from_pretrained( model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
59
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) SCREAMING_SNAKE_CASE_: Optional[int] ={ 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Tuple =['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: str =[ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger() @dataclass class UpperCAmelCase : A__ : nn.Module A__ : List[nn.Module] = field(default_factory=A_ ) A__ : list = field(default_factory=A_ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Tensor , snake_case__ : Tensor ) -> Optional[Any]: '''simple docstring''' snake_case : List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case__ ) def __call__(self : List[Any] , snake_case__ : Tensor ) -> List[Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case__ ) [x.remove() for x in self.handles] return self @property def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]: '''simple docstring''' return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class UpperCAmelCase : A__ : nn.Module A__ : nn.Module A__ : int = 1 A__ : List = field(default_factory=A_ ) A__ : List = field(default_factory=A_ ) A__ : bool = True def __call__(self : List[Any] , snake_case__ : Tensor ) -> Any: '''simple docstring''' snake_case : str = Tracker(self.dest )(snake_case__ ).parametrized snake_case : Optional[int] = Tracker(self.src )(snake_case__ ).parametrized snake_case : List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) ) snake_case : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) ) if len(snake_case__ ) != len(snake_case__ ) and self.raise_if_mismatch: raise Exception( f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while""" f""" destination module has {len(snake_case__ )}.""" ) for dest_m, src_m in zip(snake_case__ , snake_case__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) class UpperCAmelCase ( nn.Module ): def __init__(self : Tuple , snake_case__ : nn.Module ) -> Optional[Any]: '''simple docstring''' super().__init__() snake_case : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block" ), f"""Unexpected layer name {k}""" snake_case : Union[str, Any] = len(snake_case__ ) + 1 feature_blocks.append((f"""res{block_index}""", v) ) snake_case : Optional[Any] = nn.ModuleDict(snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tensor ) -> Dict: '''simple docstring''' return get_trunk_forward_outputs( snake_case__ , out_feat_keys=snake_case__ , feature_blocks=self._feature_blocks , ) class UpperCAmelCase ( A_ ): def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> str: '''simple docstring''' snake_case : List[Any] = x.split("-" ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__(self : Optional[int] , snake_case__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]: '''simple docstring''' if x not in self: snake_case : Dict = self.convert_name_to_timm(snake_case__ ) snake_case : Union[str, Any] = partial(lambda: (timm.create_model(snake_case__ , pretrained=snake_case__ ).eval(), None) ) else: snake_case : List[str] = super().__getitem__(snake_case__ ) return val class UpperCAmelCase ( A_ ): def __getitem__(self : Dict , snake_case__ : str ) -> Callable[[], nn.Module]: '''simple docstring''' if "seer" in x and "in1k" not in x: snake_case : str = RegNetModel else: snake_case : Optional[Any] = RegNetForImageClassification return val def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Tuple[str, str]] ): for from_key, to_key in keys: snake_case : str = from_state_dict[from_key].clone() print(f"""Copied key={from_key} to={to_key}""" ) return to_state_dict def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : RegNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True , ): print(f"""Converting {name}...""" ) with torch.no_grad(): snake_case , snake_case : int = from_model_func() snake_case : str = our_model_func(__lowerCamelCase ).eval() snake_case : int = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase , raise_if_mismatch=__lowerCamelCase ) snake_case : Dict = torch.randn((1, 3, 224, 224) ) module_transfer(__lowerCamelCase ) if from_state_dict is not None: snake_case : str = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: snake_case : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] snake_case : Optional[Any] = manually_copy_vissl_head(__lowerCamelCase , our_model.state_dict() , __lowerCamelCase ) our_model.load_state_dict(__lowerCamelCase ) snake_case : Any = our_model(__lowerCamelCase , output_hidden_states=__lowerCamelCase ) snake_case : Union[str, Any] = ( our_outputs.logits if isinstance(__lowerCamelCase , __lowerCamelCase ) else our_outputs.last_hidden_state ) snake_case : Union[str, Any] = from_model(__lowerCamelCase ) snake_case : Dict = from_output[-1] if type(__lowerCamelCase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: snake_case : Any = our_outputs.hidden_states[-1] assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowerCamelCase , ) snake_case : List[str] = 224 if "seer" not in name else 384 # we can use the convnext one snake_case : int = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowerCamelCase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowerCamelCase , ) print(f"""Pushed {name}""" ) def UpperCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ): snake_case : Union[str, Any] = "imagenet-1k-id2label.json" snake_case : List[str] = 1000 snake_case : List[str] = (1, num_labels) snake_case : Any = "huggingface/label-files" snake_case : List[str] = num_labels snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) ) snake_case : List[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} snake_case : str = idalabel snake_case : List[Any] = {v: k for k, v in idalabel.items()} snake_case : Dict = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) snake_case : Optional[Any] = { "regnet-x-002": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ), "regnet-x-004": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ), "regnet-x-006": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ), "regnet-x-008": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ), "regnet-x-016": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ), "regnet-x-032": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ), "regnet-x-040": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ), "regnet-x-064": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ), "regnet-x-080": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ), "regnet-x-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ), "regnet-x-160": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ), "regnet-x-320": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ), # y variant "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), "regnet-y-004": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), "regnet-y-006": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), "regnet-y-008": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), "regnet-y-016": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), "regnet-y-032": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), "regnet-y-040": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), "regnet-y-064": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), "regnet-y-080": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), "regnet-y-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), "regnet-y-160": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), "regnet-y-320": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer": RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer": RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } snake_case : Union[str, Any] = NameToOurModelFuncMap() snake_case : str = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(__lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]: snake_case : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , model_dir=str(__lowerCamelCase ) , map_location="cpu" ) snake_case : Dict = model_func() # check if we have a head, if yes add it snake_case : str = files["classy_state_dict"]["base_model"]["model"] snake_case : Dict = model_state_dict["trunk"] model.load_state_dict(__lowerCamelCase ) return model.eval(), model_state_dict["heads"] # pretrained snake_case : List[Any] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : Optional[int] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : List[str] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case : Tuple = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned snake_case : List[Any] = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : Tuple = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case : str = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case : Dict = partial( __lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( __lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( __lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported regnet* architecture,""" """ currently: regnetx-*, regnety-*. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
59
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ = ["image_processor", "tokenizer"] lowerCAmelCase__ = "ChineseCLIPImageProcessor" lowerCAmelCase__ = ("BertTokenizer", "BertTokenizerFast") def __init__( self , lowercase=None , lowercase=None , **lowercase ) -> Tuple: lowerCamelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , snake_case__ , ) lowerCamelCase_ = kwargs.pop("feature_extractor" ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(snake_case__ , snake_case__ ) lowerCamelCase_ = self.image_processor def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ) -> int: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowerCamelCase_ = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: lowerCamelCase_ = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> str: return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Optional[Any]: return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = self.tokenizer.model_input_names lowerCamelCase_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE_( self ) -> Tuple: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case__ , ) return self.image_processor_class
19
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def UpperCamelCase ( __lowerCamelCase : List[Any] ): return 1.0 / (1.0 + np.exp(-_outputs )) def UpperCamelCase ( __lowerCamelCase : int ): snake_case : Tuple = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase ) snake_case : int = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase ) class UpperCAmelCase ( A_ ): A__ : Any = "sigmoid" A__ : str = "softmax" A__ : int = "none" @add_end_docstrings( A_ ,r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " ,) class UpperCAmelCase ( A_ ): A__ : int = False A__ : Union[str, Any] = ClassificationFunction.NONE def __init__(self : List[str] , **snake_case__ : int ) -> str: '''simple docstring''' super().__init__(**snake_case__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]="" , **snake_case__ : List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = tokenizer_kwargs snake_case : List[Any] = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: snake_case : Optional[int] = self.model.config.return_all_scores if isinstance(snake_case__ , snake_case__ ) or top_k is None: snake_case : List[Any] = top_k snake_case : str = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case__ , ) if return_all_scores: snake_case : List[str] = None else: snake_case : Optional[int] = 1 if isinstance(snake_case__ , snake_case__ ): snake_case : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: snake_case : Optional[int] = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self : Dict , *snake_case__ : List[str] , **snake_case__ : int ) -> Optional[int]: '''simple docstring''' snake_case : Optional[int] = super().__call__(*snake_case__ , **snake_case__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. snake_case : Tuple = "top_k" not in kwargs if isinstance(args[0] , snake_case__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , **snake_case__ : Union[str, Any] ) -> Dict[str, GenericTensor]: '''simple docstring''' snake_case : int = self.framework if isinstance(snake_case__ , snake_case__ ): return self.tokenizer(**snake_case__ , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1 and isinstance(inputs[0] , snake_case__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] ) -> int: '''simple docstring''' return self.model(**snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : Dict=1 , snake_case__ : Tuple=True ) -> str: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: snake_case : Tuple = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: snake_case : Tuple = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: snake_case : Tuple = self.model.config.function_to_apply else: snake_case : int = ClassificationFunction.NONE snake_case : Any = model_outputs["logits"][0] snake_case : List[str] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: snake_case : Optional[Any] = sigmoid(snake_case__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: snake_case : Union[str, Any] = softmax(snake_case__ ) elif function_to_apply == ClassificationFunction.NONE: snake_case : Optional[Any] = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} snake_case : Optional[int] = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case__ ) ] if not _legacy: dict_scores.sort(key=lambda snake_case__ : x["score"] , reverse=snake_case__ ) if top_k is not None: snake_case : Optional[int] = dict_scores[:top_k] return dict_scores
59
0
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class UpperCAmelCase ( A_ , A_ , unittest.TestCase ): '''simple docstring''' snake_case_ = IFInpaintingPipeline snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"} def UpperCamelCase_ ( self : List[str] ): return self._get_dummy_components() def UpperCamelCase_ ( self : Tuple ,A : Any ,A : int=0 ): if str(snake_case__ ).startswith("mps" ): __A = torch.manual_seed(snake_case__ ) else: __A = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) __A = floats_tensor((1, 3, 32, 32) ,rng=random.Random(snake_case__ ) ).to(snake_case__ ) __A = floats_tensor((1, 3, 32, 32) ,rng=random.Random(snake_case__ ) ).to(snake_case__ ) __A = { "prompt": "A painting of a squirrel eating a burger", "image": image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def UpperCamelCase_ ( self : int ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase_ ( self : Dict ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" ,reason="float16 requires CUDA" ) def UpperCamelCase_ ( self : int ): super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase_ ( self : Optional[Any] ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase_ ( self : Any ): self._test_save_load_local() def UpperCamelCase_ ( self : str ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 ,)
15
from __future__ import annotations __lowerCamelCase = list[list[int]] # assigning initial values to the grid __lowerCamelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __lowerCamelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def UpperCamelCase ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def UpperCamelCase ( __lowerCamelCase : Matrix ): for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def UpperCamelCase ( __lowerCamelCase : Matrix ): if location := find_empty_location(__lowerCamelCase ): snake_case , snake_case : Union[str, Any] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): snake_case : List[Any] = digit if sudoku(__lowerCamelCase ) is not None: return grid snake_case : Union[str, Any] = 0 return None def UpperCamelCase ( __lowerCamelCase : Matrix ): for row in grid: for cell in row: print(__lowerCamelCase , end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") __lowerCamelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
59
0
"""simple docstring""" import math import unittest def lowerCamelCase ( _UpperCamelCase : int ) -> Union[str, Any]: '''simple docstring''' assert isinstance(__lowerCamelCase , __lowerCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def lowerCamelCase__ ( self : int ): '''simple docstring''' with self.assertRaises(snake_case__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
115
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def UpperCamelCase ( __lowerCamelCase : np.ndarray ): return input_array.reshape((input_array.size, 1) ) def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Any = np.nan for i in range(__lowerCamelCase ): snake_case : List[str] = features[:, labels == i] snake_case : Dict = data.mean(1 ) # Centralize the data of class i snake_case : Optional[Any] = data - column_reshape(__lowerCamelCase ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(__lowerCamelCase , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Optional[Any] = features.mean(1 ) snake_case : Tuple = np.nan for i in range(__lowerCamelCase ): snake_case : Tuple = features[:, labels == i] snake_case : Tuple = data.shape[1] snake_case : List[str] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[int] = device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): # Check if the features have been loaded if features.any(): snake_case : Tuple = features.mean(1 ) # Center the dataset snake_case : List[str] = features - np.reshape(__lowerCamelCase , (data_mean.size, 1) ) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) / features.shape[1] snake_case , snake_case : Dict = np.linalg.eigh(__lowerCamelCase ) # Take all the columns in the reverse order (-1), and then takes only the first snake_case : Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space snake_case : Union[str, Any] = np.dot(filtered_eigenvectors.T , __lowerCamelCase ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ): assert classes > dimensions # Check if features have been already loaded if features.any: snake_case , snake_case : str = eigh( covariance_between_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , covariance_within_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , ) snake_case : str = eigenvectors[:, ::-1][:, :dimensions] snake_case , snake_case , snake_case : int = np.linalg.svd(__lowerCamelCase ) snake_case : List[Any] = svd_matrix[:, 0:dimensions] snake_case : Optional[Any] = np.dot(filtered_svd_matrix.T , __lowerCamelCase ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( ): # Create dummy dataset with 2 classes and 3 features snake_case : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) snake_case : Union[str, Any] = np.array([0, 0, 0, 1, 1] ) snake_case : List[Any] = 2 snake_case : Any = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(__lowerCamelCase ) as error_info: snake_case : str = linear_discriminant_analysis( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if isinstance(__lowerCamelCase , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def UpperCamelCase ( ): snake_case : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) snake_case : List[str] = 2 snake_case : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(__lowerCamelCase ) as error_info: snake_case : Union[str, Any] = principal_component_analysis(__lowerCamelCase , __lowerCamelCase ) if not np.allclose(__lowerCamelCase , __lowerCamelCase ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
59
0