code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' def lowerCamelCase ( ) -> Any: lowercase_ : Dict = 0 for i in range(1 , 1001 ): total += i**i return str(UpperCAmelCase__ )[-10:] if __name__ == "__main__": print(solution())
705
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __magic_name__ : def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ): lowercase_ : int = parent lowercase_ : str = batch_size lowercase_ : List[str] = image_size lowercase_ : str = num_channels lowercase_ : List[Any] = patch_size lowercase_ : Optional[Any] = num_frames lowercase_ : Dict = is_training lowercase_ : int = use_labels lowercase_ : List[str] = hidden_size lowercase_ : Dict = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : List[Any] = attention_probs_dropout_prob lowercase_ : Any = attention_type lowercase_ : Union[str, Any] = initializer_range lowercase_ : List[str] = scope lowercase_ : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token lowercase_ : Dict = (image_size // patch_size) ** 2 lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Optional[Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase_ : int = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : int = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) lowercase_ : Any = self.num_labels return config def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ): lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ): lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) # verify the logits shape lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[str] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs lowercase_ : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ): lowercase_ : List[Any] = copy.deepcopy(lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): lowercase_ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : str = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Dict = model_class(lowercase_ ) lowercase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): if not self.has_attentions: pass else: lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : List[str] = True for model_class in self.all_model_classes: lowercase_ : str = self.model_tester.seq_length lowercase_ : int = self.model_tester.num_frames lowercase_ : int = True lowercase_ : Any = False lowercase_ : str = True lowercase_ : int = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : List[str] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase_ : List[str] = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : int = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) lowercase_ : Optional[Any] = len(lowercase_ ) # Check attention is always last and order is fine lowercase_ : Tuple = True lowercase_ : Dict = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + 1 , len(lowercase_ ) ) lowercase_ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ): lowercase_ : List[str] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : Dict = outputs.hidden_states lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase_ ) , lowercase_ ) lowercase_ : List[Any] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[str] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def lowerCamelCase ( ) -> Optional[int]: lowercase_ : List[str] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) lowercase_ : List[Any] = np.load(UpperCAmelCase__ ) return list(UpperCAmelCase__ ) @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : str ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( lowercase_ ) lowercase_ : Optional[Any] = self.default_image_processor lowercase_ : Any = prepare_video() lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : Optional[Any] = model(**lowercase_ ) # verify the logits lowercase_ : Any = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
30
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _lowercase : List[str] = logging.get_logger(__name__) class __magic_name__ ( _UpperCAmelCase): def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Any ): warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
706
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig _lowercase : Tuple = logging.get_logger(__name__) # General docstring _lowercase : List[str] = "RegNetConfig" # Base docstring _lowercase : Dict = "facebook/regnet-y-040" _lowercase : Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring _lowercase : Optional[Any] = "facebook/regnet-y-040" _lowercase : Union[str, Any] = "tabby, tabby cat" _lowercase : str = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __magic_name__ ( nn.Module): def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ): super().__init__() lowercase_ : List[Any] = nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , ) lowercase_ : str = nn.BatchNormad(lowercase_ ) lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ): lowercase_ : Dict = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : List[Any] , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : str = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowercase_ : Any = config.num_channels def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ): lowercase_ : List[str] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) lowercase_ : Any = self.embedder(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ): super().__init__() lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ ) lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ): lowercase_ : Tuple = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : int , lowercase_ : int ): super().__init__() lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) lowercase_ : int = nn.Sequential( nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ): # b c h w -> b c 1 1 lowercase_ : List[str] = self.pooler(lowercase_ ) lowercase_ : Optional[int] = self.attention(lowercase_ ) lowercase_ : Any = hidden_state * attention return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : List[Any] = in_channels != out_channels or stride != 1 lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width ) lowercase_ : Dict = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : List[Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : int = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ): lowercase_ : Any = hidden_state lowercase_ : Union[str, Any] = self.layer(lowercase_ ) lowercase_ : Union[str, Any] = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : str = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : str = in_channels != out_channels or stride != 1 lowercase_ : int = max(1 , out_channels // config.groups_width ) lowercase_ : int = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : Union[str, Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : Optional[int] = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): lowercase_ : Optional[int] = hidden_state lowercase_ : str = self.layer(lowercase_ ) lowercase_ : int = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ): super().__init__() lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer lowercase_ : str = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ): lowercase_ : Tuple = self.layers(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Dict , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : Optional[Any] = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ): self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ): lowercase_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ : Union[str, Any] = hidden_states + (hidden_state,) lowercase_ : Dict = stage_module(lowercase_ ) if output_hidden_states: lowercase_ : Optional[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = RegNetConfig UpperCamelCase__ = '''regnet''' UpperCamelCase__ = '''pixel_values''' UpperCamelCase__ = True def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ): if isinstance(lowercase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ): if isinstance(lowercase_ , lowercase_ ): lowercase_ : List[str] = value _lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Any , lowercase_ : Any ): super().__init__(lowercase_ ) lowercase_ : List[str] = config lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ ) lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ ) lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ): lowercase_ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : str = self.embedder(lowercase_ ) lowercase_ : Optional[Any] = self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : List[Any] = encoder_outputs[0] lowercase_ : str = self.pooler(lowercase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Dict , lowercase_ : str ): super().__init__(lowercase_ ) lowercase_ : Any = config.num_labels lowercase_ : List[str] = RegNetModel(lowercase_ ) # classification head lowercase_ : Any = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ): lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1] lowercase_ : List[Any] = self.classifier(lowercase_ ) lowercase_ : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase_ : Optional[int] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase_ : str = """single_label_classification""" else: lowercase_ : str = """multi_label_classification""" if self.config.problem_type == "regression": lowercase_ : str = MSELoss() if self.num_labels == 1: lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ ) elif self.config.problem_type == "single_label_classification": lowercase_ : Optional[int] = CrossEntropyLoss() lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase_ : Dict = BCEWithLogitsLoss() lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ ) if not return_dict: lowercase_ : Tuple = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
30
0
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str = " " ) -> list: lowercase_ : List[Any] = [] lowercase_ : int = 0 for index, char in enumerate(UpperCAmelCase__ ): if char == separator: split_words.append(string[last_index:index] ) lowercase_ : List[str] = index + 1 elif index + 1 == len(UpperCAmelCase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
707
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() _lowercase : Tuple = [ "word_embeddings_layernorm.weight", "word_embeddings_layernorm.bias", "input_layernorm.weight", "input_layernorm.bias", "post_attention_layernorm.weight", "post_attention_layernorm.bias", "self_attention.dense.bias", "mlp.dense_4h_to_h.bias", "ln_f.weight", "ln_f.bias", ] _lowercase : str = [ "mlp.dense_4h_to_h.weight", "self_attention.dense.weight", ] def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> str: lowercase_ : List[str] = { """word_embeddings.weight""": """word_embeddings.weight""", """word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""", """word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""", """weight""": """ln_f.weight""", """bias""": """ln_f.bias""", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks lowercase_ : Dict = int(re.match(R""".*layer_(\d*).*""" , UpperCAmelCase__ )[1] ) layer_number -= 3 return F'''h.{layer_number}.''' + key def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Union[str, Any]: if dtype == torch.bool: return 1 / 8 lowercase_ : Tuple = re.search(R"""[^\d](\d+)$""" , str(UpperCAmelCase__ ) ) if bit_search is None: raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' ) lowercase_ : Optional[int] = int(bit_search.groups()[0] ) return bit_size // 8 def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]: # Construct model if bloom_config_file == "": lowercase_ : Union[str, Any] = BloomConfig() else: lowercase_ : int = BloomConfig.from_json_file(UpperCAmelCase__ ) if shard_model: lowercase_ : Optional[Any] = os.listdir(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCAmelCase__ ) ) lowercase_ : Union[str, Any] = {"""weight_map""": {}, """metadata""": {}} lowercase_ : Optional[int] = 0 lowercase_ : int = None lowercase_ : str = BloomConfig() for j, file in enumerate(UpperCAmelCase__ ): print("""Processing file: {}""".format(UpperCAmelCase__ ) ) lowercase_ : Any = None for i in range(UpperCAmelCase__ ): # load all TP files lowercase_ : Any = file.replace("""model_00""" , F'''model_0{i}''' ) lowercase_ : Any = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location="""cpu""" ) # Rename keys in the transformers names lowercase_ : int = list(temp.keys() ) for key in keys: lowercase_ : Any = temp.pop(UpperCAmelCase__ ) if tensors is None: lowercase_ : str = temp else: for key in tensors.keys(): if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel lowercase_ : Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks lowercase_ : Any = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): lowercase_ : Dict = tensors[key] / pretraining_tp torch.save( UpperCAmelCase__ , os.path.join( UpperCAmelCase__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): lowercase_ : Optional[int] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: lowercase_ : Optional[int] = """pytorch_model_{}-of-{}.bin""".format( str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) lowercase_ : Dict = BloomConfig() lowercase_ : int = pytorch_dump_folder_path + """/""" + CONFIG_NAME lowercase_ : List[Any] = total_size with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) with open(os.path.join(UpperCAmelCase__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f: lowercase_ : List[Any] = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + """\n""" f.write(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = BloomModel(UpperCAmelCase__ ) lowercase_ : int = os.listdir(UpperCAmelCase__ ) lowercase_ : Optional[int] = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCAmelCase__ ) ) lowercase_ : Union[str, Any] = None for i, file in enumerate(UpperCAmelCase__ ): lowercase_ : Tuple = None for i in range(UpperCAmelCase__ ): # load all TP files lowercase_ : List[str] = file.replace("""model_00""" , F'''model_0{i}''' ) lowercase_ : List[Any] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location="""cpu""" ) # Rename keys in the transformers names lowercase_ : int = list(temp.keys() ) for key in keys: lowercase_ : int = temp.pop(UpperCAmelCase__ ) if tensors is None: lowercase_ : Dict = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel lowercase_ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks lowercase_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): lowercase_ : int = tensors[key] / pretraining_tp lowercase_ : Optional[int] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: lowercase_ : Union[str, Any] = set(other_keys.missing_keys ) else: lowercase_ : str = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) lowercase_ : Union[str, Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME lowercase_ : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: lowercase_ : Union[str, Any] = model.to(config.torch_dtype ) torch.save(model.state_dict() , UpperCAmelCase__ ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bloom_checkpoint_path", default=None, type=str, required=True, help="Path to the Megatron-LM checkpoint path.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--bloom_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--shard_model", action="store_true", help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint", ) parser.add_argument( "--pretraining_tp", default=4, type=int, help="Pretraining TP rank that has been used when training the model in Megatron-LM \n", ) _lowercase : List[str] = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
708
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ ) lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase_ : str = dataset_size < in_memory_max_size else: lowercase_ : List[Any] = False lowercase_ : Any = is_small_dataset(UpperCAmelCase__ ) assert result == expected
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowercase : Optional[int] = { "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ "FlaxLongT5ForConditionalGeneration", "FlaxLongT5Model", "FlaxLongT5PreTrainedModel", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys _lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
709
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ) lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" ) lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy() lowercase_ : Optional[int] = -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
30
0
'''simple docstring''' import argparse import os import re _lowercase : List[Any] = "src/transformers" # Pattern that looks at the indentation in a line. _lowercase : str = re.compile(r"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. _lowercase : str = re.compile(r"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. _lowercase : List[Any] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. _lowercase : Tuple = re.compile(r"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. _lowercase : str = re.compile(r"\[([^\]]+)\]") def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Optional[Any]: lowercase_ : Union[str, Any] = _re_indent.search(UpperCAmelCase__ ) return "" if search is None else search.groups()[0] def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]="" , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None ) -> int: lowercase_ : Dict = 0 lowercase_ : List[str] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(UpperCAmelCase__ ): index += 1 lowercase_ : str = ["""\n""".join(lines[:index] )] else: lowercase_ : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowercase_ : Optional[Any] = [lines[index]] index += 1 while index < len(UpperCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(UpperCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(UpperCAmelCase__ ) ) if index < len(UpperCAmelCase__ ) - 1: lowercase_ : str = [lines[index + 1]] index += 1 else: lowercase_ : int = [] else: blocks.append("""\n""".join(UpperCAmelCase__ ) ) lowercase_ : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(UpperCAmelCase__ ) > 0: blocks.append("""\n""".join(UpperCAmelCase__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(UpperCAmelCase__ ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any: def _inner(UpperCAmelCase__ : List[str] ): return key(UpperCAmelCase__ ).lower().replace("""_""" , """""" ) return _inner def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str=None ) -> int: # If no key is provided, we use a noop. def noop(UpperCAmelCase__ : List[Any] ): return x if key is None: lowercase_ : int = noop # Constants are all uppercase, they go first. lowercase_ : Any = [obj for obj in objects if key(UpperCAmelCase__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowercase_ : Union[str, Any] = [obj for obj in objects if key(UpperCAmelCase__ )[0].isupper() and not key(UpperCAmelCase__ ).isupper()] # Functions begin with a lowercase, they go last. lowercase_ : str = [obj for obj in objects if not key(UpperCAmelCase__ )[0].isupper()] lowercase_ : Dict = ignore_underscore(UpperCAmelCase__ ) return sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> Optional[int]: # This inner function sort imports between [ ]. def _replace(UpperCAmelCase__ : Dict ): lowercase_ : str = match.groups()[0] if "," not in imports: return F'''[{imports}]''' lowercase_ : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowercase_ : int = keys[:-1] return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase__ )] ) + "]" lowercase_ : Union[str, Any] = import_statement.split("""\n""" ) if len(UpperCAmelCase__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowercase_ : Optional[int] = 2 if lines[1].strip() == """[""" else 1 lowercase_ : Tuple = [(i, _re_strip_line.search(UpperCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowercase_ : Dict = sort_objects(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] ) lowercase_ : int = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(UpperCAmelCase__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowercase_ : Optional[Any] = _re_bracket_content.sub(_replace , lines[1] ) else: lowercase_ : Optional[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowercase_ : List[str] = keys[:-1] lowercase_ : Optional[Any] = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase__ )] ) return "\n".join(UpperCAmelCase__ ) else: # Finally we have to deal with imports fitting on one line lowercase_ : List[Any] = _re_bracket_content.sub(_replace , UpperCAmelCase__ ) return import_statement def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=True ) -> Any: with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f: lowercase_ : str = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowercase_ : Union[str, Any] = split_code_in_indented_blocks( UpperCAmelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(UpperCAmelCase__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowercase_ : List[str] = main_blocks[block_idx] lowercase_ : Union[str, Any] = block.split("""\n""" ) # Get to the start of the imports. lowercase_ : Union[str, Any] = 0 while line_idx < len(UpperCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowercase_ : int = len(UpperCAmelCase__ ) else: line_idx += 1 if line_idx >= len(UpperCAmelCase__ ): continue # Ignore beginning and last line: they don't contain anything. lowercase_ : Dict = """\n""".join(block_lines[line_idx:-1] ) lowercase_ : Optional[int] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowercase_ : List[str] = split_code_in_indented_blocks(UpperCAmelCase__ , indent_level=UpperCAmelCase__ ) # We have two categories of import key: list or _import_structure[key].append/extend lowercase_ : str = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowercase_ : str = [(pattern.search(UpperCAmelCase__ ).groups()[0] if pattern.search(UpperCAmelCase__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowercase_ : List[Any] = [(i, key) for i, key in enumerate(UpperCAmelCase__ ) if key is not None] lowercase_ : List[str] = [x[0] for x in sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowercase_ : Any = 0 lowercase_ : str = [] for i in range(len(UpperCAmelCase__ ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: lowercase_ : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(UpperCAmelCase__ ) count += 1 # And we put our main block back together with its first and last line. lowercase_ : List[Any] = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(UpperCAmelCase__ ): if check_only: return True else: print(F'''Overwriting {file}.''' ) with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(UpperCAmelCase__ ) ) def lowerCamelCase ( UpperCAmelCase__ : List[Any]=True ) -> Dict: lowercase_ : List[Any] = [] for root, _, files in os.walk(UpperCAmelCase__ ): if "__init__.py" in files: lowercase_ : Optional[Any] = sort_imports(os.path.join(UpperCAmelCase__ , """__init__.py""" ) , check_only=UpperCAmelCase__ ) if result: lowercase_ : Dict = [os.path.join(UpperCAmelCase__ , """__init__.py""" )] if len(UpperCAmelCase__ ) > 0: raise ValueError(F'''Would overwrite {len(UpperCAmelCase__ )} files, run `make style`.''' ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") _lowercase : Union[str, Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
710
'''simple docstring''' from collections.abc import Callable import numpy as np def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array: lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) ) lowercase_ : List[Any] = np.zeros((n + 1,) ) lowercase_ : List[Any] = ya lowercase_ : List[str] = xa for k in range(UpperCAmelCase__ ): lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] ) lowercase_ : List[Any] = y[k] + ( (step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Tuple , lowercase_ : Any , lowercase_ : Any=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Dict=18 , lowercase_ : str=30 , lowercase_ : Tuple=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , ): lowercase_ : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18} lowercase_ : Dict = parent lowercase_ : Optional[int] = batch_size lowercase_ : Tuple = num_channels lowercase_ : Optional[int] = image_size lowercase_ : Tuple = min_resolution lowercase_ : Tuple = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : Optional[int] = size lowercase_ : Any = apply_ocr def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = LayoutLMvaImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """apply_ocr""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): pass def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): # Initialize image_processing lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image ) # Test not batched input lowercase_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , lowercase_ ) self.assertIsInstance(encoding.boxes , lowercase_ ) # Test batched lowercase_ : Dict = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): # Initialize image_processing lowercase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray ) # Test not batched input lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowercase_ : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def SCREAMING_SNAKE_CASE_ ( self : Any ): # Initialize image_processing lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor ) # Test not batched input lowercase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowercase_ : Optional[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def SCREAMING_SNAKE_CASE_ ( self : Any ): # with apply_OCR = True lowercase_ : Tuple = LayoutLMvaImageProcessor() from datasets import load_dataset lowercase_ : Any = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" ) lowercase_ : Tuple = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) lowercase_ : Union[str, Any] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowercase_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 lowercase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , lowercase_ ) self.assertListEqual(encoding.boxes , lowercase_ ) # with apply_OCR = False lowercase_ : List[Any] = LayoutLMvaImageProcessor(apply_ocr=lowercase_ ) lowercase_ : Any = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
711
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Dict = (1 - _cos) / 2 lowercase_ : Optional[int] = 1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : List[Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Optional[int] = sin(UpperCAmelCase__ ) lowercase_ : Dict = cos(UpperCAmelCase__ ) lowercase_ : Optional[int] = _sin / (2 * q_factor) lowercase_ : Dict = (1 + _cos) / 2 lowercase_ : str = -1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : List[Any] = 1 - alpha lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : int = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ ) lowercase_ : str = _sin / (2 * q_factor) lowercase_ : str = _sin / 2 lowercase_ : Any = 0 lowercase_ : Optional[Any] = -ba lowercase_ : Dict = 1 + alpha lowercase_ : Union[str, Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : List[str] = tau * frequency / samplerate lowercase_ : Any = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : Optional[Any] = _sin / (2 * q_factor) lowercase_ : Any = 1 - alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : Optional[int] = 1 + alpha lowercase_ : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : List[Any] = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : List[str] = 1 + alpha * big_a lowercase_ : List[Any] = -2 * _cos lowercase_ : Dict = 1 - alpha * big_a lowercase_ : str = 1 + alpha / big_a lowercase_ : List[str] = -2 * _cos lowercase_ : Tuple = 1 - alpha / big_a lowercase_ : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ ) lowercase_ : Any = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (pmc + aaa) lowercase_ : List[str] = 2 * big_a * mpc lowercase_ : Union[str, Any] = big_a * (pmc - aaa) lowercase_ : Optional[int] = ppmc + aaa lowercase_ : Optional[int] = -2 * pmpc lowercase_ : Any = ppmc - aaa lowercase_ : Optional[int] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Dict = _sin / (2 * q_factor) lowercase_ : Union[str, Any] = 10 ** (gain_db / 40) lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (ppmc + aaa) lowercase_ : List[Any] = -2 * big_a * pmpc lowercase_ : Optional[Any] = big_a * (ppmc - aaa) lowercase_ : Optional[Any] = pmc + aaa lowercase_ : int = 2 * mpc lowercase_ : Tuple = pmc - aaa lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
30
0
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness _lowercase : Dict = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" _lowercase : Dict = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" _lowercase : Optional[int] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" _lowercase : List[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" _lowercase : Any = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class __magic_name__ ( datasets.Metric): def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[int]=[1, 10, 100] , lowercase_ : List[str]=4 , lowercase_ : Any=3.0 ): if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=lowercase_ ) as executor: lowercase_ : Optional[Any] = [] lowercase_ : List[str] = Counter() lowercase_ : Union[str, Any] = 0 lowercase_ : List[str] = defaultdict(lowercase_ ) for task_id, (candidates, test_case) in enumerate(zip(lowercase_ , lowercase_ ) ): for candidate in candidates: lowercase_ : Any = candidate + """\n""" + test_case lowercase_ : Dict = (test_program, timeout, task_id, completion_id[task_id]) lowercase_ : Union[str, Any] = executor.submit(lowercase_ , *lowercase_ ) futures.append(lowercase_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(lowercase_ ): lowercase_ : Optional[Any] = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) lowercase_ : int = [], [] for result in results.values(): result.sort() lowercase_ : str = [r[1]["""passed"""] for r in result] total.append(len(lowercase_ ) ) correct.append(sum(lowercase_ ) ) lowercase_ : List[str] = np.array(lowercase_ ) lowercase_ : List[Any] = np.array(lowercase_ ) lowercase_ : Optional[int] = k lowercase_ : List[Any] = {f'''pass@{k}''': estimate_pass_at_k(lowercase_ , lowercase_ , lowercase_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ) -> int: def estimator(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Any = itertools.repeat(UpperCAmelCase__ , len(UpperCAmelCase__ ) ) else: assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = iter(UpperCAmelCase__ ) return np.array([estimator(int(UpperCAmelCase__ ) , int(UpperCAmelCase__ ) , UpperCAmelCase__ ) for n, c in zip(UpperCAmelCase__ , UpperCAmelCase__ )] )
712
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _lowercase : str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __magic_name__ ( datasets.BuilderConfig): UpperCamelCase__ = None def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str: import pyspark def generate_fn(): lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" ) lowercase_ : Any = partition_df.collect() lowercase_ : Dict = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class __magic_name__ ( _BaseExamplesIterable): def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ): lowercase_ : Dict = df lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ): yield from self.generate_examples_fn() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ): lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return len(self.partition_order ) class __magic_name__ ( datasets.DatasetBuilder): UpperCamelCase__ = SparkConfig def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ): import pyspark lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowercase_ : Optional[int] = df lowercase_ : List[str] = working_dir super().__init__( cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , ) def SCREAMING_SNAKE_CASE_ ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(lowercase_ : str ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase_ ) lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase_ , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowercase_ : str = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): import pyspark def get_arrow_batch_size(lowercase_ : Any ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) lowercase_ : Union[str, Any] = self.df.count() lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowercase_ : Any = ( self.df.limit(lowercase_ ) .repartition(1 ) .mapInArrow(lowercase_ , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) ) lowercase_ : Any = self.df.repartition(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ): import pyspark lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath lowercase_ : Optional[Any] = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowercase_ : Tuple = self.config.features lowercase_ : Any = self._writer_batch_size lowercase_ : List[str] = self._fs.storage_options def write_arrow(lowercase_ : str ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId() lowercase_ : Dict = next(lowercase_ , lowercase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) lowercase_ : int = 0 lowercase_ : List[Any] = writer_class( features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowercase_ , lowercase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 lowercase_ : Any = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : List[str] = pa.Table.from_batches([batch] ) writer.write_table(lowercase_ ) if writer._num_bytes > 0: lowercase_ , lowercase_ : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase_ ) ): lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) ) shutil.move(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = ( self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ): self._validate_cache_dir() lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase_ ) lowercase_ : Tuple = not is_remote_filesystem(self._fs ) lowercase_ : int = os.path.join if is_local else posixpath.join lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN""" lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ ) lowercase_ : Any = 0 lowercase_ : Tuple = 0 lowercase_ : int = 0 lowercase_ : Dict = [] lowercase_ : Union[str, Any] = [] for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ): ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Union[str, Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase_ ) lowercase_ : List[str] = total_num_examples lowercase_ : int = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: lowercase_ : Tuple = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowercase_ : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowercase_ : int , lowercase_ : int , lowercase_ : int , ): rename( lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , ) lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 0 for i in range(len(lowercase_ ) ): lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i] for shard_id in range(lowercase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect() else: # don't use any pattern lowercase_ : List[str] = 0 lowercase_ : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
30
0
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : int ): return None class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ): return None class __magic_name__ ( unittest.TestCase): UpperCamelCase__ = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(lowercase_ , """tf""" , 12 , **lowercase_ ) @require_torch @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(lowercase_ , """pt""" , 12 , **lowercase_ ) @require_torch @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): from transformers import BertModel lowercase_ : str = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(lowercase_ ) ) vocab_file.flush() lowercase_ : str = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Any = BertModel(BertConfig(vocab_size=len(lowercase_ ) ) ) model.save_pretrained(lowercase_ ) self._test_export(lowercase_ , """pt""" , 12 , lowercase_ ) @require_tf @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(lowercase_ , """tf""" , 12 , **lowercase_ ) lowercase_ : Dict = quantize(Path(lowercase_ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Union[str, Any] = self._test_export(lowercase_ , """pt""" , 12 , **lowercase_ ) lowercase_ : List[Any] = quantize(lowercase_ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str]=None , **lowercase_ : Tuple ): try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : List[str] = Path(lowercase_ ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) return path except Exception as e: self.fail(lowercase_ ) @require_torch @require_tokenizers @slow def SCREAMING_SNAKE_CASE_ ( self : int ): from transformers import BertModel lowercase_ : List[str] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(lowercase_ , lowercase_ , """pt""" ) @require_tf @require_tokenizers @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): from transformers import TFBertModel lowercase_ : Any = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowercase_ : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(lowercase_ , lowercase_ , """tf""" ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ): lowercase_ : Optional[int] = FeatureExtractionPipeline(lowercase_ , lowercase_ ) lowercase_ : List[Any] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowercase_ : Any = infer_shapes(lowercase_ , lowercase_ ) # Assert all variables are present self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , lowercase_ ) self.assertSequenceEqual(variable_names[3:] , lowercase_ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowercase_ : List[Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowercase_ : Optional[Any] = ensure_valid_input(FuncContiguousArgs() , lowercase_ , lowercase_ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(lowercase_ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(lowercase_ ) , set(lowercase_ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(lowercase_ , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , lowercase_ , lowercase_ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(lowercase_ ) , 1 ) self.assertEqual(len(lowercase_ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] , """input_ids""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
713
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Dict = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' import unittest import numpy as np def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ): lowercase_ : List[Any] = np.shape(UpperCAmelCase__ ) lowercase_ : Dict = np.shape(UpperCAmelCase__ ) lowercase_ : int = np.shape(UpperCAmelCase__ ) if shape_a[0] != shape_b[0]: lowercase_ : Optional[int] = ( """Expected the same number of rows for A and B. """ F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(UpperCAmelCase__ ) if shape_b[1] != shape_c[1]: lowercase_ : Optional[Any] = ( """Expected the same number of columns for B and C. """ F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(UpperCAmelCase__ ) lowercase_ : Any = pseudo_inv if a_inv is None: try: lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Dict = np.array([[2, 1], [6, 3]] ) lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] ) lowercase_ : Optional[int] = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) self.assertAlmostEqual(lowercase_ , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
714
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def lowerCamelCase ( ) -> None: lowercase_ : List[Any] = input("""Enter message: """ ) lowercase_ : str = input("""Enter key [alphanumeric]: """ ) lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): lowercase_ : List[str] = """encrypt""" lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) elif mode.lower().startswith("""d""" ): lowercase_ : Any = """decrypt""" lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''\n{mode.title()}ed message:''' ) print(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: lowercase_ : Union[str, Any] = [] lowercase_ : List[Any] = 0 lowercase_ : str = key.upper() for symbol in message: lowercase_ : Tuple = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(UpperCAmelCase__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(UpperCAmelCase__ ): lowercase_ : Any = 0 else: translated.append(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) if __name__ == "__main__": main()
30
0
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class __magic_name__ ( _UpperCAmelCase): def __init__( self : Tuple , lowercase_ : Distribution , lowercase_ : Tuple=None , lowercase_ : List[str]=None , lowercase_ : int=0 ): lowercase_ : Union[str, Any] = 1.0 if scale is None else scale lowercase_ : List[Any] = 0.0 if loc is None else loc super().__init__(lowercase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowercase_ )] ) @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return self.base_dist.mean * self.scale + self.loc @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.base_dist.variance * self.scale**2 @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): return self.variance.sqrt() class __magic_name__ ( nn.Module): def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : Dict[str, int] , lowercase_ : Callable[..., Tuple[torch.Tensor]] , **lowercase_ : List[Any] ): super().__init__(**lowercase_ ) lowercase_ : List[Any] = args_dim lowercase_ : Tuple = nn.ModuleList([nn.Linear(lowercase_ , lowercase_ ) for dim in args_dim.values()] ) lowercase_ : int = domain_map def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : torch.Tensor ): lowercase_ : Any = [proj(lowercase_ ) for proj in self.proj] return self.domain_map(*lowercase_ ) class __magic_name__ ( nn.Module): def __init__( self : Optional[Any] , lowercase_ : List[Any] ): super().__init__() lowercase_ : Dict = function def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Tuple , *lowercase_ : Optional[int] ): return self.function(lowercase_ , *lowercase_ ) class __magic_name__ : UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 def __init__( self : Dict , lowercase_ : int = 1 ): lowercase_ : str = dim lowercase_ : int = {k: dim * self.args_dim[k] for k in self.args_dim} def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] ): if self.dim == 1: return self.distribution_class(*lowercase_ ) else: return Independent(self.distribution_class(*lowercase_ ) , 1 ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , ): lowercase_ : str = self._base_distribution(lowercase_ ) if loc is None and scale is None: return distr else: return AffineTransformed(lowercase_ , loc=lowercase_ , scale=lowercase_ , event_dim=self.event_dim ) @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return () if self.dim == 1 else (self.dim,) @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): return len(self.event_shape ) @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return 0.0 def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : int ): return ParameterProjection( in_features=lowercase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , *lowercase_ : torch.Tensor ): raise NotImplementedError() @staticmethod def SCREAMING_SNAKE_CASE_ ( lowercase_ : torch.Tensor ): return (x + torch.sqrt(torch.square(lowercase_ ) + 4.0 )) / 2.0 class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = {'''df''': 1, '''loc''': 1, '''scale''': 1} UpperCamelCase__ = StudentT @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor ): lowercase_ : List[str] = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps ) lowercase_ : Optional[Any] = 2.0 + cls.squareplus(lowercase_ ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = {'''loc''': 1, '''scale''': 1} UpperCamelCase__ = Normal @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor ): lowercase_ : Any = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = {'''total_count''': 1, '''logits''': 1} UpperCamelCase__ = NegativeBinomial @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor ): lowercase_ : int = cls.squareplus(lowercase_ ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple ): lowercase_ : Any = distr_args if self.dim == 1: return self.distribution_class(total_count=lowercase_ , logits=lowercase_ ) else: return Independent(self.distribution_class(total_count=lowercase_ , logits=lowercase_ ) , 1 ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None ): lowercase_ : Union[str, Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
715
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = """ylacombe/bark-small""" lowercase_ : List[str] = tempfile.mkdtemp() lowercase_ : Tuple = """en_speaker_1""" lowercase_ : Union[str, Any] = """This is a test string""" lowercase_ : int = """speaker_embeddings_path.json""" lowercase_ : Any = """speaker_embeddings""" def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ): return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = self.get_tokenizer() lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase_ : Optional[int] = 35 lowercase_ : int = 2 lowercase_ : Union[str, Any] = 8 lowercase_ : Union[str, Any] = { """semantic_prompt""": np.ones(lowercase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : Dict = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : List[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_tokenizer() lowercase_ : int = BarkProcessor(tokenizer=lowercase_ ) lowercase_ : Any = processor(text=self.input_string ) lowercase_ : List[str] = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = { "configuration_xmod": [ "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = [ "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", "XmodForQuestionAnswering", "XmodForSequenceClassification", "XmodForTokenClassification", "XmodModel", "XmodPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
716
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True}) UpperCamelCase__ = Features({'''image''': Image()}) UpperCamelCase__ = Features({'''labels''': ClassLabel}) UpperCamelCase__ = "image" UpperCamelCase__ = "labels" def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowercase_ : List[str] = copy.deepcopy(self ) lowercase_ : List[str] = self.label_schema.copy() lowercase_ : List[Any] = features[self.label_column] lowercase_ : Optional[Any] = label_schema return task_template @property def SCREAMING_SNAKE_CASE_ ( self : int ): return { self.image_column: "image", self.label_column: "labels", }
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowercase : Optional[Any] = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = ["SpeechEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
717
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowercase_ : str = 1.5 lowercase_ : List[Any] = int(factor * num_class_images ) lowercase_ : int = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ : List[str] = client.query(text=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase_ : List[str] = int(factor * num_images ) lowercase_ : List[str] = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , ) lowercase_ : List[str] = 0 lowercase_ : Dict = 0 lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open( F'''{class_data_dir}/images.txt''' , """w""" ) as fa: while total < num_class_images: lowercase_ : str = class_images[count] count += 1 try: lowercase_ : Union[str, Any] = requests.get(images["""url"""] ) if img.status_code == 200: lowercase_ : List[str] = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ ) return parser.parse_args() if __name__ == "__main__": _lowercase : Dict = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
30
0
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = """ylacombe/bark-small""" lowercase_ : List[str] = tempfile.mkdtemp() lowercase_ : Tuple = """en_speaker_1""" lowercase_ : Union[str, Any] = """This is a test string""" lowercase_ : int = """speaker_embeddings_path.json""" lowercase_ : Any = """speaker_embeddings""" def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ): return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = self.get_tokenizer() lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase_ : Optional[int] = 35 lowercase_ : int = 2 lowercase_ : Union[str, Any] = 8 lowercase_ : Union[str, Any] = { """semantic_prompt""": np.ones(lowercase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : Dict = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : List[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_tokenizer() lowercase_ : int = BarkProcessor(tokenizer=lowercase_ ) lowercase_ : Any = processor(text=self.input_string ) lowercase_ : List[str] = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
718
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None: if start is None: lowercase_ : Any = 0 if end is None: lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1 if start >= end: return lowercase_ : Optional[int] = (start + end) // 2 slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ ) if sequence[end] < sequence[mid]: lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end] slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Optional[Any] = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
719
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowercase : Dict = parser.parse_args() _lowercase : Dict = "cpu" _lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowercase : Any = "path-to-your-trained-model" _lowercase : str = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowercase : Any = pipe.to(device) # to channels last _lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last) _lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last) _lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowercase : int = torch.randn(2, 4, 64, 64) _lowercase : int = torch.rand(1) * 999 _lowercase : Union[str, Any] = torch.randn(2, 77, 768) _lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status) try: _lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowercase : int = 666 _lowercase : Any = torch.Generator(device).manual_seed(seed) _lowercase : int = {"generator": generator} if args.steps is not None: _lowercase : Optional[int] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
30
0
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowercase_ : str = 1.5 lowercase_ : List[Any] = int(factor * num_class_images ) lowercase_ : int = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ : List[str] = client.query(text=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase_ : List[str] = int(factor * num_images ) lowercase_ : List[str] = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , ) lowercase_ : List[str] = 0 lowercase_ : Dict = 0 lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open( F'''{class_data_dir}/images.txt''' , """w""" ) as fa: while total < num_class_images: lowercase_ : str = class_images[count] count += 1 try: lowercase_ : Union[str, Any] = requests.get(images["""url"""] ) if img.status_code == 200: lowercase_ : List[str] = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ ) return parser.parse_args() if __name__ == "__main__": _lowercase : Dict = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
720
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Optional[Any] = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Optional[Any] = { "google/vivit-b-16x2-kinetics400": ( "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''vivit''' def __init__( self : List[str] , lowercase_ : Optional[Any]=224 , lowercase_ : List[str]=32 , lowercase_ : Any=[2, 16, 16] , lowercase_ : List[str]=3 , lowercase_ : List[Any]=768 , lowercase_ : str=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Tuple=3072 , lowercase_ : Tuple="gelu_fast" , lowercase_ : List[str]=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : int=1E-06 , lowercase_ : Union[str, Any]=True , **lowercase_ : Optional[int] , ): lowercase_ : Tuple = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Any = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : Optional[Any] = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : Optional[Any] = attention_probs_dropout_prob lowercase_ : Dict = initializer_range lowercase_ : str = layer_norm_eps lowercase_ : Union[str, Any] = image_size lowercase_ : Tuple = num_frames lowercase_ : Tuple = tubelet_size lowercase_ : Union[str, Any] = num_channels lowercase_ : str = qkv_bias super().__init__(**lowercase_ )
721
'''simple docstring''' import unittest import numpy as np def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray: lowercase_ : List[Any] = np.shape(UpperCAmelCase__ ) lowercase_ : Dict = np.shape(UpperCAmelCase__ ) lowercase_ : int = np.shape(UpperCAmelCase__ ) if shape_a[0] != shape_b[0]: lowercase_ : Optional[int] = ( """Expected the same number of rows for A and B. """ F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(UpperCAmelCase__ ) if shape_b[1] != shape_c[1]: lowercase_ : Optional[Any] = ( """Expected the same number of columns for B and C. """ F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(UpperCAmelCase__ ) lowercase_ : Any = pseudo_inv if a_inv is None: try: lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Dict = np.array([[2, 1], [6, 3]] ) lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] ) lowercase_ : Optional[int] = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) self.assertAlmostEqual(lowercase_ , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ) -> tuple[float, float]: # Check if the input is valid if not len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) == 3: raise ValueError("""Please enter a valid equation.""" ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("""Both a & b of two equations can't be zero.""" ) # Extract the coefficients lowercase_ : Any = equationa lowercase_ : Optional[int] = equationa # Calculate the determinants of the matrices lowercase_ : List[Any] = aa * ba - aa * ba lowercase_ : str = ca * ba - ca * ba lowercase_ : str = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("""Infinite solutions. (Consistent system)""" ) else: raise ValueError("""No solution. (Inconsistent system)""" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: lowercase_ : List[str] = determinant_x / determinant lowercase_ : Union[str, Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
700
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(UpperCAmelCase__ ) lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data ) lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6) else: lowercase_ : Union[str, Any] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode() + padding ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[str] = ( """argument should be a bytes-like object or ASCII string, """ F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(UpperCAmelCase__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): try: lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowercase_ : Any = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase_ : Optional[int] = encoded_data[:-padding] lowercase_ : Any = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase_ : int = """""".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data ) lowercase_ : Optional[int] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(UpperCAmelCase__ ) , 8 ) ] return bytes(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' from ... import PretrainedConfig _lowercase : Union[str, Any] = { "sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json", } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP UpperCamelCase__ = '''nezha''' def __init__( self : Dict , lowercase_ : Tuple=21128 , lowercase_ : Dict=768 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : str=3072 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Dict=512 , lowercase_ : str=64 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=1E-12 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=0 , lowercase_ : Optional[Any]=2 , lowercase_ : str=3 , lowercase_ : Optional[int]=True , **lowercase_ : Dict , ): super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowercase_ : Dict = vocab_size lowercase_ : int = hidden_size lowercase_ : List[str] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : List[Any] = hidden_act lowercase_ : List[str] = intermediate_size lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : int = attention_probs_dropout_prob lowercase_ : Union[str, Any] = max_position_embeddings lowercase_ : Tuple = max_relative_position lowercase_ : List[str] = type_vocab_size lowercase_ : List[str] = initializer_range lowercase_ : List[str] = layer_norm_eps lowercase_ : Union[str, Any] = classifier_dropout lowercase_ : List[Any] = use_cache
701
'''simple docstring''' import argparse _lowercase : Optional[int] = "docs/source/_static/js/custom.js" def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict: with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Optional[int] = f.readlines() lowercase_ : Tuple = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += F''' "v{version}": "v{version}",\n''' with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") _lowercase : Dict = parser.parse_args() update_custom_js(args.version)
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _lowercase : str = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
702
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ): lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : Any = image_size lowercase_ : Optional[Any] = num_channels lowercase_ : Any = embeddings_size lowercase_ : Union[str, Any] = hidden_sizes lowercase_ : Any = depths lowercase_ : Dict = is_training lowercase_ : Tuple = use_labels lowercase_ : str = hidden_act lowercase_ : Optional[Any] = num_labels lowercase_ : Tuple = scope lowercase_ : Any = len(lowercase_ ) lowercase_ : Optional[Any] = out_features lowercase_ : Tuple = out_indices lowercase_ : str = num_groups def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : List[Any] = None if self.use_labels: lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : int = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ): lowercase_ : Optional[int] = BitModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ): lowercase_ : Union[str, Any] = self.num_labels lowercase_ : Tuple = BitForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Any = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ): lowercase_ : Any = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Dict = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase_ : List[str] = None lowercase_ : Dict = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[int] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs lowercase_ : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = BitModelTester(self ) lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return @unittest.skip(reason="""Bit does not output attentions""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[Any] = model_class(lowercase_ ) lowercase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(config=lowercase_ ) for name, module in model.named_modules(): if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : Optional[Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase_ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Dict = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase_ : Union[str, Any] = layer_type lowercase_ : Optional[Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Union[str, Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ ) lowercase_ : int = self.default_image_processor lowercase_ : List[Any] = prepare_img() lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : str = model(**lowercase_ ) # verify the logits lowercase_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (BitBackbone,) if is_torch_available() else () UpperCamelCase__ = BitConfig UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Union[str, Any] = BitModelTester(self )
30
0
from __future__ import annotations _lowercase : Optional[Any] = tuple[int, int, int] _lowercase : List[Any] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase _lowercase : List[str] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # -------------------------- default selection -------------------------- # rotors -------------------------- _lowercase : Optional[int] = "EGZWVONAHDCLFQMSIPJBYUKXTR" _lowercase : str = "FOBHMDKEXQNRAULPGSJVTYICZW" _lowercase : Dict = "ZJXESIUQLHAVRMDOYGTNFWPBKC" # reflector -------------------------- _lowercase : str = { "A": "N", "N": "A", "B": "O", "O": "B", "C": "P", "P": "C", "D": "Q", "Q": "D", "E": "R", "R": "E", "F": "S", "S": "F", "G": "T", "T": "G", "H": "U", "U": "H", "I": "V", "V": "I", "J": "W", "W": "J", "K": "X", "X": "K", "L": "Y", "Y": "L", "M": "Z", "Z": "M", } # -------------------------- extra rotors -------------------------- _lowercase : List[str] = "RMDJXFUWGISLHVTCQNKYPBEZOA" _lowercase : Any = "SGLCPQWZHKXAREONTFBVIYJUDM" _lowercase : List[str] = "HVSICLTYKQUBXDWAJZOMFGPREN" _lowercase : str = "RZWQHFMVDBKICJLNTUXAGYPSOE" _lowercase : Optional[Any] = "LFKIJODBEGAMQPXVUHYSTCZRWN" _lowercase : Union[str, Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS" def lowerCamelCase ( UpperCAmelCase__ : RotorPositionT , UpperCAmelCase__ : RotorSelectionT , UpperCAmelCase__ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: # Checks if there are 3 unique rotors if (unique_rotsel := len(set(UpperCAmelCase__ ) )) < 3: lowercase_ : Union[str, Any] = F'''Please use 3 unique rotors (not {unique_rotsel})''' raise Exception(UpperCAmelCase__ ) # Checks if rotor positions are valid lowercase_ : List[Any] = rotpos if not 0 < rotorposa <= len(UpperCAmelCase__ ): lowercase_ : Optional[Any] = F'''First rotor position is not within range of 1..26 ({rotorposa}''' raise ValueError(UpperCAmelCase__ ) if not 0 < rotorposa <= len(UpperCAmelCase__ ): lowercase_ : List[str] = F'''Second rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(UpperCAmelCase__ ) if not 0 < rotorposa <= len(UpperCAmelCase__ ): lowercase_ : Dict = F'''Third rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(UpperCAmelCase__ ) # Validates string and returns dict lowercase_ : Optional[int] = _plugboard(UpperCAmelCase__ ) return rotpos, rotsel, pbdict def lowerCamelCase ( UpperCAmelCase__ : str ) -> dict[str, str]: # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Union[str, Any] = F'''Plugboard setting isn\'t type string ({type(UpperCAmelCase__ )})''' raise TypeError(UpperCAmelCase__ ) elif len(UpperCAmelCase__ ) % 2 != 0: lowercase_ : Tuple = F'''Odd number of symbols ({len(UpperCAmelCase__ )})''' raise Exception(UpperCAmelCase__ ) elif pbstring == "": return {} pbstring.replace(""" """ , """""" ) # Checks if all characters are unique lowercase_ : List[Any] = set() for i in pbstring: if i not in abc: lowercase_ : Optional[int] = F'''\'{i}\' not in list of symbols''' raise Exception(UpperCAmelCase__ ) elif i in tmppbl: lowercase_ : int = F'''Duplicate symbol ({i})''' raise Exception(UpperCAmelCase__ ) else: tmppbl.add(UpperCAmelCase__ ) del tmppbl # Created the dictionary lowercase_ : int = {} for j in range(0 , len(UpperCAmelCase__ ) - 1 , 2 ): lowercase_ : int = pbstring[j + 1] lowercase_ : Optional[Any] = pbstring[j] return pb def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : RotorPositionT , UpperCAmelCase__ : RotorSelectionT = (rotora, rotora, rotora) , UpperCAmelCase__ : str = "" , ) -> str: lowercase_ : Any = text.upper() lowercase_ : Any = _validator( UpperCAmelCase__ , UpperCAmelCase__ , plugb.upper() ) lowercase_ : List[Any] = rotor_position lowercase_ : Optional[int] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 lowercase_ : List[str] = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: lowercase_ : Dict = plugboard[symbol] # rotor ra -------------------------- lowercase_ : Optional[int] = abc.index(UpperCAmelCase__ ) + rotorposa lowercase_ : Dict = rotora[index % len(UpperCAmelCase__ )] # rotor rb -------------------------- lowercase_ : str = abc.index(UpperCAmelCase__ ) + rotorposa lowercase_ : List[Any] = rotora[index % len(UpperCAmelCase__ )] # rotor rc -------------------------- lowercase_ : Dict = abc.index(UpperCAmelCase__ ) + rotorposa lowercase_ : List[Any] = rotora[index % len(UpperCAmelCase__ )] # reflector -------------------------- # this is the reason you don't need another machine to decipher lowercase_ : Union[str, Any] = reflector[symbol] # 2nd rotors lowercase_ : List[str] = abc[rotora.index(UpperCAmelCase__ ) - rotorposa] lowercase_ : List[str] = abc[rotora.index(UpperCAmelCase__ ) - rotorposa] lowercase_ : Union[str, Any] = abc[rotora.index(UpperCAmelCase__ ) - rotorposa] # 2nd plugboard if symbol in plugboard: lowercase_ : int = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(UpperCAmelCase__ ): lowercase_ : int = 0 rotorposa += 1 if rotorposa >= len(UpperCAmelCase__ ): lowercase_ : Optional[Any] = 0 rotorposa += 1 if rotorposa >= len(UpperCAmelCase__ ): lowercase_ : List[Any] = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : List[Any] = "This is my Python script that emulates the Enigma machine from WWII." _lowercase : Tuple = (1, 1, 1) _lowercase : str = "pictures" _lowercase : Any = (rotora, rotora, rotora) _lowercase : Optional[int] = enigma(message, rotor_pos, rotor_sel, pb) print("Encrypted message:", en) print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
703
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge _lowercase : Optional[Any] = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] _lowercase : List[Any] = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowerCamelCase ( ) -> List[str]: lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] ) assert ( pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean() ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Tuple = """rougeLsum""" lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCamelCase ( ) -> List[Any]: lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""] lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ ) assert score_sep == score_no_sep def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Union[str, Any] = [ """Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""", """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""", ] lowercase_ : List[str] = [ """Margot Frank, died in 1945, a month earlier than previously thought.""", """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of""" """ the final seconds on board Flight 9525.""", ] assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) def lowerCamelCase ( ) -> Union[str, Any]: lowercase_ : Optional[Any] = [ """\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """ ] lowercase_ : List[Any] = [ """ Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .""" ] lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""] lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""] assert new_score > prev_score def lowerCamelCase ( ) -> Tuple: lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" ) lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = calculate_rouge_path( data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ ) assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
30
0
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] ) -> Optional[Any]: return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int="attention" ) -> Tuple: lowercase_ : List[str] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) lowercase_ : List[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) lowercase_ : Any = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) lowercase_ : List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) lowercase_ : Union[str, Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) lowercase_ : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) lowercase_ : List[str] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) lowercase_ : str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ) -> List[Any]: if split_mlp_wi: lowercase_ : List[Any] = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] lowercase_ : Tuple = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] lowercase_ : str = (wi_a, wi_a) else: lowercase_ : str = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] lowercase_ : List[str] = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] ) -> int: return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowerCamelCase ( UpperCAmelCase__ : dict , *, UpperCAmelCase__ : int , UpperCAmelCase__ : bool , UpperCAmelCase__ : bool = False ) -> str: lowercase_ : int = traverse_util.flatten_dict(variables["""target"""] ) lowercase_ : Dict = {"""/""".join(UpperCAmelCase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowercase_ : Tuple = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , UpperCAmelCase__ ) lowercase_ : List[Any] = collections.OrderedDict() # Shared embeddings. lowercase_ : Tuple = old["""token_embedder/embedding"""] # Encoder. for i in range(UpperCAmelCase__ ): # Block i, layer 0 (Self Attention). lowercase_ : List[str] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" ) lowercase_ : int = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , """attention""" ) lowercase_ : Optional[Any] = layer_norm lowercase_ : Any = k.T lowercase_ : str = o.T lowercase_ : Union[str, Any] = q.T lowercase_ : Optional[Any] = v.T # Block i, layer 1 (MLP). lowercase_ : Any = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowercase_ : Dict = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , UpperCAmelCase__ ) lowercase_ : Tuple = layer_norm if split_mlp_wi: lowercase_ : Tuple = wi[0].T lowercase_ : Optional[Any] = wi[1].T else: lowercase_ : Optional[Any] = wi.T lowercase_ : Any = wo.T if scalable_attention: # convert the rel_embedding of each layer lowercase_ : Dict = tax_relpos_bias_lookup( UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" ).T lowercase_ : Dict = old["""encoder/encoder_norm/scale"""] if not scalable_attention: lowercase_ : Dict = tax_relpos_bias_lookup( UpperCAmelCase__ , 0 , """encoder""" ).T lowercase_ : List[Any] = tax_relpos_bias_lookup( UpperCAmelCase__ , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(UpperCAmelCase__ ): # Block i, layer 0 (Self Attention). lowercase_ : List[Any] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowercase_ : str = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """self_attention""" ) lowercase_ : Tuple = layer_norm lowercase_ : Optional[int] = k.T lowercase_ : str = o.T lowercase_ : List[Any] = q.T lowercase_ : List[str] = v.T # Block i, layer 1 (Cross Attention). lowercase_ : Tuple = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowercase_ : Union[str, Any] = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """encoder_decoder_attention""" ) lowercase_ : Union[str, Any] = layer_norm lowercase_ : Union[str, Any] = k.T lowercase_ : str = o.T lowercase_ : Dict = q.T lowercase_ : Union[str, Any] = v.T # Block i, layer 2 (MLP). lowercase_ : Any = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowercase_ : Tuple = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , UpperCAmelCase__ ) lowercase_ : Optional[int] = layer_norm if split_mlp_wi: lowercase_ : Tuple = wi[0].T lowercase_ : Optional[Any] = wi[1].T else: lowercase_ : Any = wi.T lowercase_ : str = wo.T if scalable_attention: # convert the rel_embedding of each layer lowercase_ : Tuple = tax_relpos_bias_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" ).T lowercase_ : List[Any] = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowercase_ : Tuple = old["""decoder/logits_dense/kernel"""].T return new def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : bool ) -> Dict: lowercase_ : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowercase_ : Tuple = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowercase_ : Union[str, Any] = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowercase_ : Any = state_dict["""shared.weight"""] return state_dict def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Dict: lowercase_ : Union[str, Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase__ ) lowercase_ : Tuple = convert_tax_to_pytorch( UpperCAmelCase__ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase__ , scalable_attention=UpperCAmelCase__ ) lowercase_ : int = make_state_dict(UpperCAmelCase__ , UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , ) -> Union[str, Any]: lowercase_ : Any = MTaConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowercase_ : Any = UMTaEncoderModel(UpperCAmelCase__ ) else: lowercase_ : Any = UMTaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCAmelCase__ ) # Verify that we can load the checkpoint. model.from_pretrained(UpperCAmelCase__ ) print("""Done""" ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) parser.add_argument( "--scalable_attention", action="store_true", help="Whether the model uses scaled attention (umt5 model)", default=False, ) _lowercase : Any = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
704
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''speech_to_text''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ): lowercase_ : List[Any] = vocab_size lowercase_ : str = d_model lowercase_ : List[Any] = encoder_ffn_dim lowercase_ : str = encoder_layers lowercase_ : Dict = encoder_attention_heads lowercase_ : str = decoder_ffn_dim lowercase_ : int = decoder_layers lowercase_ : Any = decoder_attention_heads lowercase_ : Any = dropout lowercase_ : Dict = attention_dropout lowercase_ : Optional[int] = activation_dropout lowercase_ : Any = activation_function lowercase_ : Union[str, Any] = init_std lowercase_ : str = encoder_layerdrop lowercase_ : Optional[int] = decoder_layerdrop lowercase_ : Dict = use_cache lowercase_ : Union[str, Any] = encoder_layers lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True lowercase_ : Dict = max_source_positions lowercase_ : Optional[int] = max_target_positions lowercase_ : Tuple = num_conv_layers lowercase_ : Tuple = list(lowercase_ ) lowercase_ : Union[str, Any] = conv_channels lowercase_ : str = input_feat_per_channel lowercase_ : str = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
30
0
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) lowercase_ : List[str] = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) model.to(lowercase_ ) from datasets import load_dataset lowercase_ : Any = load_dataset("""nielsr/rvlcdip-demo""" ) lowercase_ : Optional[Any] = dataset["""train"""][0]["""image"""].convert("""RGB""" ) lowercase_ : str = image_processor(lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : Optional[Any] = model(**lowercase_ ) lowercase_ : Dict = outputs.logits lowercase_ : Optional[int] = torch.Size((1, 16) ) self.assertEqual(logits.shape , lowercase_ ) lowercase_ : List[str] = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] , device=lowercase_ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
705
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __magic_name__ : def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ): lowercase_ : int = parent lowercase_ : str = batch_size lowercase_ : List[str] = image_size lowercase_ : str = num_channels lowercase_ : List[Any] = patch_size lowercase_ : Optional[Any] = num_frames lowercase_ : Dict = is_training lowercase_ : int = use_labels lowercase_ : List[str] = hidden_size lowercase_ : Dict = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : List[Any] = attention_probs_dropout_prob lowercase_ : Any = attention_type lowercase_ : Union[str, Any] = initializer_range lowercase_ : List[str] = scope lowercase_ : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token lowercase_ : Dict = (image_size // patch_size) ** 2 lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Optional[Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase_ : int = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : int = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) lowercase_ : Any = self.num_labels return config def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ): lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ): lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ ) # verify the logits shape lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[str] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs lowercase_ : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ): lowercase_ : List[Any] = copy.deepcopy(lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): lowercase_ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : str = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Dict = model_class(lowercase_ ) lowercase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): if not self.has_attentions: pass else: lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : List[str] = True for model_class in self.all_model_classes: lowercase_ : str = self.model_tester.seq_length lowercase_ : int = self.model_tester.num_frames lowercase_ : int = True lowercase_ : Any = False lowercase_ : str = True lowercase_ : int = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : List[str] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase_ : List[str] = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : int = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) lowercase_ : Optional[Any] = len(lowercase_ ) # Check attention is always last and order is fine lowercase_ : Tuple = True lowercase_ : Dict = True lowercase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + 1 , len(lowercase_ ) ) lowercase_ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ): lowercase_ : List[str] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : Dict = outputs.hidden_states lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase_ ) , lowercase_ ) lowercase_ : List[Any] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[str] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def lowerCamelCase ( ) -> Optional[int]: lowercase_ : List[str] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) lowercase_ : List[Any] = np.load(UpperCAmelCase__ ) return list(UpperCAmelCase__ ) @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : str ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( lowercase_ ) lowercase_ : Optional[Any] = self.default_image_processor lowercase_ : Any = prepare_video() lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : Optional[Any] = model(**lowercase_ ) # verify the logits lowercase_ : Any = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
30
0
'''simple docstring''' _lowercase : List[Any] = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1 def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ): if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ): if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
706
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig _lowercase : Tuple = logging.get_logger(__name__) # General docstring _lowercase : List[str] = "RegNetConfig" # Base docstring _lowercase : Dict = "facebook/regnet-y-040" _lowercase : Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring _lowercase : Optional[Any] = "facebook/regnet-y-040" _lowercase : Union[str, Any] = "tabby, tabby cat" _lowercase : str = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __magic_name__ ( nn.Module): def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ): super().__init__() lowercase_ : List[Any] = nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , ) lowercase_ : str = nn.BatchNormad(lowercase_ ) lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ): lowercase_ : Dict = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : List[Any] , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : str = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowercase_ : Any = config.num_channels def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ): lowercase_ : List[str] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) lowercase_ : Any = self.embedder(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ): super().__init__() lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ ) lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ): lowercase_ : Tuple = self.convolution(lowercase_ ) lowercase_ : str = self.normalization(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : int , lowercase_ : int ): super().__init__() lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) lowercase_ : int = nn.Sequential( nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ): # b c h w -> b c 1 1 lowercase_ : List[str] = self.pooler(lowercase_ ) lowercase_ : Optional[int] = self.attention(lowercase_ ) lowercase_ : Any = hidden_state * attention return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : List[Any] = in_channels != out_channels or stride != 1 lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width ) lowercase_ : Dict = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : List[Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : int = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ): lowercase_ : Any = hidden_state lowercase_ : Union[str, Any] = self.layer(lowercase_ ) lowercase_ : Union[str, Any] = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : str = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ): super().__init__() lowercase_ : str = in_channels != out_channels or stride != 1 lowercase_ : int = max(1 , out_channels // config.groups_width ) lowercase_ : int = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) lowercase_ : Union[str, Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) lowercase_ : Optional[int] = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): lowercase_ : Optional[int] = hidden_state lowercase_ : str = self.layer(lowercase_ ) lowercase_ : int = self.shortcut(lowercase_ ) hidden_state += residual lowercase_ : Optional[Any] = self.activation(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ): super().__init__() lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer lowercase_ : str = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ): lowercase_ : Tuple = self.layers(lowercase_ ) return hidden_state class __magic_name__ ( nn.Module): def __init__( self : Dict , lowercase_ : RegNetConfig ): super().__init__() lowercase_ : Optional[Any] = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ): self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ): lowercase_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ : Union[str, Any] = hidden_states + (hidden_state,) lowercase_ : Dict = stage_module(lowercase_ ) if output_hidden_states: lowercase_ : Optional[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = RegNetConfig UpperCamelCase__ = '''regnet''' UpperCamelCase__ = '''pixel_values''' UpperCamelCase__ = True def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ): if isinstance(lowercase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ): if isinstance(lowercase_ , lowercase_ ): lowercase_ : List[str] = value _lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Any , lowercase_ : Any ): super().__init__(lowercase_ ) lowercase_ : List[str] = config lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ ) lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ ) lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ): lowercase_ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : str = self.embedder(lowercase_ ) lowercase_ : Optional[Any] = self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : List[Any] = encoder_outputs[0] lowercase_ : str = self.pooler(lowercase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''', _UpperCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __magic_name__ ( _UpperCAmelCase): def __init__( self : Dict , lowercase_ : str ): super().__init__(lowercase_ ) lowercase_ : Any = config.num_labels lowercase_ : List[str] = RegNetModel(lowercase_ ) # classification head lowercase_ : Any = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ): lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1] lowercase_ : List[Any] = self.classifier(lowercase_ ) lowercase_ : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase_ : Optional[int] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase_ : str = """single_label_classification""" else: lowercase_ : str = """multi_label_classification""" if self.config.problem_type == "regression": lowercase_ : str = MSELoss() if self.num_labels == 1: lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ ) elif self.config.problem_type == "single_label_classification": lowercase_ : Optional[int] = CrossEntropyLoss() lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase_ : Dict = BCEWithLogitsLoss() lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ ) if not return_dict: lowercase_ : Tuple = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
30
0
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 _lowercase : List[Any] = get_tests_dir("fixtures/dummy-config.json") class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[int] = 0 def SCREAMING_SNAKE_CASE_ ( self : Any ): self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Any = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[Any] = AutoConfig.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = AutoConfig.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : str = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """fake-roberta""" ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) with open(os.path.join(lowercase_ , """config.json""" ) , """w""" ) as f: f.write(json.dumps({} ) ) lowercase_ : Any = AutoConfig.from_pretrained(lowercase_ ) self.assertEqual(type(lowercase_ ) , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): try: AutoConfig.register("""custom""" , lowercase_ ) # Wrong model type will raise an error with self.assertRaises(lowercase_ ): AutoConfig.register("""model""" , lowercase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase_ ): AutoConfig.register("""bert""" , lowercase_ ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase_ : List[Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : int = AutoConfig.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE_ ( self : Any ): with self.assertRaisesRegex( lowercase_ , """bert-base is not a local folder and is not a valid model identifier""" ): lowercase_ : Optional[int] = AutoConfig.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): with self.assertRaisesRegex( lowercase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowercase_ : Optional[Any] = AutoConfig.from_pretrained(lowercase_ , revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): with self.assertRaisesRegex( lowercase_ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ): lowercase_ : Tuple = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase_ ): lowercase_ : str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase_ ): lowercase_ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowercase_ ) lowercase_ : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowercase_ ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = AutoConfig.from_pretrained(lowercase_ , trust_remote_code=lowercase_ ) self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''new-model''' try: AutoConfig.register("""new-model""" , lowercase_ ) # If remote code is not set, the default is to use local lowercase_ : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. lowercase_ : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowercase_ ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub lowercase_ : Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowercase_ ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
707
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' import pickle import numpy as np from matplotlib import pyplot as plt class __magic_name__ : def __init__( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Any=0.2 , lowercase_ : int=0.2 ): lowercase_ : Optional[int] = bp_numa lowercase_ : int = bp_numa lowercase_ : Optional[int] = bp_numa lowercase_ : List[str] = conva_get[:2] lowercase_ : str = conva_get[2] lowercase_ : Tuple = size_pa lowercase_ : Union[str, Any] = rate_w lowercase_ : Optional[Any] = rate_t lowercase_ : str = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase_ : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1 def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] ): # save model dict with pickle lowercase_ : int = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(lowercase_ , """wb""" ) as f: pickle.dump(lowercase_ , lowercase_ ) print(f'''Model saved: {save_path}''' ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , lowercase_ : List[Any] ): # read saved model with open(lowercase_ , """rb""" ) as f: lowercase_ : List[Any] = pickle.load(lowercase_ ) # noqa: S301 lowercase_ : Any = model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) lowercase_ : Tuple = model_dic.get("""size_pooling1""" ) lowercase_ : Tuple = model_dic.get("""num_bp1""" ) lowercase_ : Dict = model_dic.get("""num_bp2""" ) lowercase_ : Tuple = model_dic.get("""num_bp3""" ) lowercase_ : List[str] = model_dic.get("""rate_weight""" ) lowercase_ : Optional[int] = model_dic.get("""rate_thre""" ) # create model instance lowercase_ : List[str] = CNN(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # modify model parameter lowercase_ : int = model_dic.get("""w_conv1""" ) lowercase_ : str = model_dic.get("""wkj""" ) lowercase_ : List[Any] = model_dic.get("""vji""" ) lowercase_ : Union[str, Any] = model_dic.get("""thre_conv1""" ) lowercase_ : Union[str, Any] = model_dic.get("""thre_bp2""" ) lowercase_ : List[str] = model_dic.get("""thre_bp3""" ) return conv_ins def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): return 1 / (1 + np.exp(-1 * x )) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] ): return round(lowercase_ , 3 ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Dict ): # convolution process lowercase_ : List[str] = convs[0] lowercase_ : Tuple = convs[1] lowercase_ : Tuple = np.shape(lowercase_ )[0] # get the data slice of original image data, data_focus lowercase_ : Dict = [] for i_focus in range(0 , size_data - size_conv + 1 , lowercase_ ): for j_focus in range(0 , size_data - size_conv + 1 , lowercase_ ): lowercase_ : Optional[int] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(lowercase_ ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : List[Any] = [] lowercase_ : int = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(lowercase_ ): lowercase_ : Any = [] for i_focus in range(len(lowercase_ ) ): lowercase_ : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(lowercase_ ) ) lowercase_ : Optional[int] = np.asmatrix(lowercase_ ).reshape( lowercase_ , lowercase_ ) data_featuremap.append(lowercase_ ) # expanding the data slice to One dimenssion lowercase_ : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(lowercase_ ) ) lowercase_ : Tuple = np.asarray(lowercase_ ) return focus_list, data_featuremap def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Tuple="average_pool" ): # pooling process lowercase_ : int = len(featuremaps[0] ) lowercase_ : Optional[int] = int(size_map / size_pooling ) lowercase_ : Dict = [] for i_map in range(len(lowercase_ ) ): lowercase_ : Any = featuremaps[i_map] lowercase_ : List[Any] = [] for i_focus in range(0 , lowercase_ , lowercase_ ): for j_focus in range(0 , lowercase_ , lowercase_ ): lowercase_ : List[Any] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(lowercase_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(lowercase_ ) ) lowercase_ : int = np.asmatrix(lowercase_ ).reshape(lowercase_ , lowercase_ ) featuremap_pooled.append(lowercase_ ) return featuremap_pooled def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any] ): # expanding three dimension data to one dimension list lowercase_ : Optional[int] = [] for i in range(len(lowercase_ ) ): lowercase_ : List[str] = np.shape(data[i] ) lowercase_ : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) lowercase_ : int = data_listed.getA().tolist()[0] data_expanded.extend(lowercase_ ) lowercase_ : Union[str, Any] = np.asarray(lowercase_ ) return data_expanded def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Dict ): # expanding matrix to one dimension list lowercase_ : Optional[int] = np.asarray(lowercase_ ) lowercase_ : Optional[Any] = np.shape(lowercase_ ) lowercase_ : Tuple = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] ): lowercase_ : Tuple = [] lowercase_ : List[str] = 0 for i_map in range(lowercase_ ): lowercase_ : Any = np.ones((size_map, size_map) ) for i in range(0 , lowercase_ , lowercase_ ): for j in range(0 , lowercase_ , lowercase_ ): lowercase_ : List[str] = pd_pool[ i_pool ] lowercase_ : Tuple = i_pool + 1 lowercase_ : Dict = np.multiply( lowercase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(lowercase_ ) return pd_all def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Any=bool ): # model traning print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(lowercase_ )) ) print((""" - - Shape: Teach_Data """, np.shape(lowercase_ )) ) lowercase_ : Tuple = 0 lowercase_ : Optional[Any] = [] lowercase_ : str = 10000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : Any = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(lowercase_ ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : Any = np.asmatrix(datas_train[p] ) lowercase_ : Optional[int] = np.asarray(datas_teach[p] ) lowercase_ : List[Any] = self.convolute( lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase_ : List[Any] = self.pooling(lowercase_ , self.size_poolinga ) lowercase_ : List[str] = np.shape(lowercase_ ) lowercase_ : Dict = self._expand(lowercase_ ) lowercase_ : Tuple = data_bp_input lowercase_ : Union[str, Any] = np.dot(lowercase_ , self.vji.T ) - self.thre_bpa lowercase_ : Tuple = self.sig(lowercase_ ) lowercase_ : Tuple = np.dot(lowercase_ , self.wkj.T ) - self.thre_bpa lowercase_ : Tuple = self.sig(lowercase_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : int = np.multiply( (data_teach - bp_outa) , np.multiply(lowercase_ , (1 - bp_outa) ) ) lowercase_ : str = np.multiply( np.dot(lowercase_ , self.wkj ) , np.multiply(lowercase_ , (1 - bp_outa) ) ) lowercase_ : List[Any] = np.dot(lowercase_ , self.vji ) lowercase_ : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Tuple = pd_conva_pooled.T.getA().tolist() lowercase_ : Any = self._calculate_gradient_from_pool( lowercase_ , lowercase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Dict = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Union[str, Any] = self.rate_weight * np.dot(lowercase_ , lowercase_ ) lowercase_ : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Optional[Any] = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : Any = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : Dict = rp + 1 lowercase_ : Any = error_count / patterns all_mse.append(lowercase_ ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(lowercase_ , """+-""" ) plt.plot(lowercase_ , """r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(lowercase_ , alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[str] ): # model predict lowercase_ : List[Any] = [] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(lowercase_ )) ) for p in range(len(lowercase_ ) ): lowercase_ : Any = np.asmatrix(datas_test[p] ) lowercase_ : Any = self.convolute( lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase_ : List[str] = self.pooling(lowercase_ , self.size_poolinga ) lowercase_ : List[Any] = self._expand(lowercase_ ) lowercase_ : str = data_bp_input lowercase_ : int = bp_outa * self.vji.T - self.thre_bpa lowercase_ : Union[str, Any] = self.sig(lowercase_ ) lowercase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Any = self.sig(lowercase_ ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[Any] = [list(map(self.do_round , lowercase_ ) ) for each in produce_out] return np.asarray(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ): # return the data of image after convoluting process so we can check it out lowercase_ : str = np.asmatrix(lowercase_ ) lowercase_ : Optional[int] = self.convolute( lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase_ : Dict = self.pooling(lowercase_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
708
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ ) lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase_ : str = dataset_size < in_memory_max_size else: lowercase_ : List[Any] = False lowercase_ : Any = is_small_dataset(UpperCAmelCase__ ) assert result == expected
30
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor _lowercase : str = logging.get_logger(__name__) class __magic_name__ ( _UpperCAmelCase): def __init__( self : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ): warnings.warn( """The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use DeformableDetrImageProcessor instead.""" , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
709
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ) lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" ) lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy() lowercase_ : Optional[int] = -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
30
0
'''simple docstring''' import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures _lowercase : Tuple = logging.get_logger(__name__) @dataclass class __magic_name__ : UpperCamelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys())}) UpperCamelCase__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''}) UpperCamelCase__ = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''}) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[Any] = self.task_name.lower() class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''train''' UpperCamelCase__ = '''dev''' UpperCamelCase__ = '''test''' class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 def __init__( self : Union[str, Any] , lowercase_ : GlueDataTrainingArguments , lowercase_ : PreTrainedTokenizerBase , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Split] = Split.train , lowercase_ : Optional[str] = None , ): warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowercase_ , ) lowercase_ : Optional[Any] = args lowercase_ : Union[str, Any] = glue_processors[args.task_name]() lowercase_ : Dict = glue_output_modes[args.task_name] if isinstance(lowercase_ , lowercase_ ): try: lowercase_ : str = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file lowercase_ : Optional[Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , ) lowercase_ : int = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowercase_ : Tuple = label_list[2], label_list[1] lowercase_ : Union[str, Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowercase_ : Union[str, Any] = cached_features_file + """.lock""" with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: lowercase_ : Optional[Any] = time.time() lowercase_ : str = torch.load(lowercase_ ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) else: logger.info(f'''Creating features from dataset file at {args.data_dir}''' ) if mode == Split.dev: lowercase_ : Optional[Any] = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowercase_ : Optional[int] = self.processor.get_test_examples(args.data_dir ) else: lowercase_ : Dict = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowercase_ : Optional[int] = examples[:limit_length] lowercase_ : Optional[int] = glue_convert_examples_to_features( lowercase_ , lowercase_ , max_length=args.max_seq_length , label_list=lowercase_ , output_mode=self.output_mode , ) lowercase_ : Any = time.time() torch.save(self.features , lowercase_ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self : Tuple ): return len(self.features ) def __getitem__( self : Optional[int] , lowercase_ : Dict ): return self.features[i] def SCREAMING_SNAKE_CASE_ ( self : int ): return self.label_list
710
'''simple docstring''' from collections.abc import Callable import numpy as np def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array: lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) ) lowercase_ : List[Any] = np.zeros((n + 1,) ) lowercase_ : List[Any] = ya lowercase_ : List[str] = xa for k in range(UpperCAmelCase__ ): lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] ) lowercase_ : List[Any] = y[k] + ( (step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
30
0
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _lowercase : int = 299792458 # Symbols _lowercase : Dict = symbols("ct x y z") def lowerCamelCase ( UpperCAmelCase__ : float ) -> float: if velocity > c: raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("""Speed must be greater than or equal to 1!""" ) return velocity / c def lowerCamelCase ( UpperCAmelCase__ : float ) -> float: return 1 / sqrt(1 - beta(UpperCAmelCase__ ) ** 2 ) def lowerCamelCase ( UpperCAmelCase__ : float ) -> np.ndarray: return np.array( [ [gamma(UpperCAmelCase__ ), -gamma(UpperCAmelCase__ ) * beta(UpperCAmelCase__ ), 0, 0], [-gamma(UpperCAmelCase__ ) * beta(UpperCAmelCase__ ), gamma(UpperCAmelCase__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : np.ndarray | None = None ) -> np.ndarray: # Ensure event is not empty if event is None: lowercase_ : Any = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(UpperCAmelCase__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _lowercase : List[Any] = transform(29979245) print("Example of four vector: ") print(f"""ct' = {four_vector[0]}""") print(f"""x' = {four_vector[1]}""") print(f"""y' = {four_vector[2]}""") print(f"""z' = {four_vector[3]}""") # Substitute symbols with numerical values _lowercase : Optional[Any] = {ct: c, x: 1, y: 1, z: 1} _lowercase : int = [four_vector[i].subs(sub_dict) for i in range(4)] print(f"""\n{numerical_vector}""")
711
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Dict = (1 - _cos) / 2 lowercase_ : Optional[int] = 1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : List[Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : Optional[int] = sin(UpperCAmelCase__ ) lowercase_ : Dict = cos(UpperCAmelCase__ ) lowercase_ : Optional[int] = _sin / (2 * q_factor) lowercase_ : Dict = (1 + _cos) / 2 lowercase_ : str = -1 - _cos lowercase_ : Dict = 1 + alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : List[Any] = 1 - alpha lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : int = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ ) lowercase_ : str = _sin / (2 * q_factor) lowercase_ : str = _sin / 2 lowercase_ : Any = 0 lowercase_ : Optional[Any] = -ba lowercase_ : Dict = 1 + alpha lowercase_ : Union[str, Any] = -2 * _cos lowercase_ : Union[str, Any] = 1 - alpha lowercase_ : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: lowercase_ : List[str] = tau * frequency / samplerate lowercase_ : Any = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : Optional[Any] = _sin / (2 * q_factor) lowercase_ : Any = 1 - alpha lowercase_ : Optional[Any] = -2 * _cos lowercase_ : Optional[int] = 1 + alpha lowercase_ : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Tuple = sin(UpperCAmelCase__ ) lowercase_ : List[Any] = cos(UpperCAmelCase__ ) lowercase_ : List[Any] = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : List[str] = 1 + alpha * big_a lowercase_ : List[Any] = -2 * _cos lowercase_ : Dict = 1 - alpha * big_a lowercase_ : str = 1 + alpha / big_a lowercase_ : List[str] = -2 * _cos lowercase_ : Tuple = 1 - alpha / big_a lowercase_ : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : Dict = tau * frequency / samplerate lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ ) lowercase_ : Any = cos(UpperCAmelCase__ ) lowercase_ : Any = _sin / (2 * q_factor) lowercase_ : Any = 10 ** (gain_db / 40) lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (pmc + aaa) lowercase_ : List[str] = 2 * big_a * mpc lowercase_ : Union[str, Any] = big_a * (pmc - aaa) lowercase_ : Optional[int] = ppmc + aaa lowercase_ : Optional[int] = -2 * pmpc lowercase_ : Any = ppmc - aaa lowercase_ : Optional[int] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase_ : str = tau * frequency / samplerate lowercase_ : int = sin(UpperCAmelCase__ ) lowercase_ : int = cos(UpperCAmelCase__ ) lowercase_ : Dict = _sin / (2 * q_factor) lowercase_ : Union[str, Any] = 10 ** (gain_db / 40) lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha lowercase_ : Tuple = big_a * (ppmc + aaa) lowercase_ : List[Any] = -2 * big_a * pmpc lowercase_ : Optional[Any] = big_a * (ppmc - aaa) lowercase_ : Optional[Any] = pmc + aaa lowercase_ : int = 2 * mpc lowercase_ : Tuple = pmc - aaa lowercase_ : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
30
0
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _lowercase : Dict = object() # For specifying empty leaf dict `{}` _lowercase : Any = object() def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ) -> Dict: lowercase_ : str = tuple((re.compile(x + """$""" ) for x in qs) ) for i in range(len(UpperCAmelCase__ ) - len(UpperCAmelCase__ ) + 1 ): lowercase_ : Any = [x.match(UpperCAmelCase__ ) for x, y in zip(UpperCAmelCase__ , ks[i:] )] if matches and all(UpperCAmelCase__ ): return True return False def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> List[Any]: def replace(UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ): for rule, replacement in rules: if _match(UpperCAmelCase__ , UpperCAmelCase__ ): return replacement return val return replace def lowerCamelCase ( ) -> Union[str, Any]: return [ # embeddings (("transformer", "wpe", "embedding"), P("""mp""" , UpperCAmelCase__ )), (("transformer", "wte", "embedding"), P("""mp""" , UpperCAmelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase__ , """mp""" )), (("attention", "out_proj", "kernel"), P("""mp""" , UpperCAmelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCAmelCase__ , """mp""" )), (("mlp", "c_fc", "bias"), P("""mp""" )), (("mlp", "c_proj", "kernel"), P("""mp""" , UpperCAmelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> str: lowercase_ : Dict = _get_partition_rules() lowercase_ : Optional[int] = _replacement_rules(UpperCAmelCase__ ) lowercase_ : Dict = {k: _unmatched for k in flatten_dict(UpperCAmelCase__ )} lowercase_ : Tuple = {k: replace(UpperCAmelCase__ , UpperCAmelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCAmelCase__ ) )
712
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _lowercase : str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __magic_name__ ( datasets.BuilderConfig): UpperCamelCase__ = None def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str: import pyspark def generate_fn(): lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" ) lowercase_ : Any = partition_df.collect() lowercase_ : Dict = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class __magic_name__ ( _BaseExamplesIterable): def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ): lowercase_ : Dict = df lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ): yield from self.generate_examples_fn() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ): lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return len(self.partition_order ) class __magic_name__ ( datasets.DatasetBuilder): UpperCamelCase__ = SparkConfig def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ): import pyspark lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate() lowercase_ : Optional[int] = df lowercase_ : List[str] = working_dir super().__init__( cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , ) def SCREAMING_SNAKE_CASE_ ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(lowercase_ : str ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase_ ) lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase_ , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowercase_ : str = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ): import pyspark def get_arrow_batch_size(lowercase_ : Any ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) lowercase_ : Union[str, Any] = self.df.count() lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowercase_ : Any = ( self.df.limit(lowercase_ ) .repartition(1 ) .mapInArrow(lowercase_ , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) ) lowercase_ : Any = self.df.repartition(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ): import pyspark lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath lowercase_ : Optional[Any] = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowercase_ : Tuple = self.config.features lowercase_ : Any = self._writer_batch_size lowercase_ : List[str] = self._fs.storage_options def write_arrow(lowercase_ : str ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId() lowercase_ : Dict = next(lowercase_ , lowercase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) lowercase_ : int = 0 lowercase_ : List[Any] = writer_class( features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowercase_ , lowercase_ : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 lowercase_ : Any = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowercase_ : List[str] = pa.Table.from_batches([batch] ) writer.write_table(lowercase_ ) if writer._num_bytes > 0: lowercase_ , lowercase_ : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase_ ) ): lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) ) shutil.move(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = ( self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ): self._validate_cache_dir() lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase_ ) lowercase_ : Tuple = not is_remote_filesystem(self._fs ) lowercase_ : int = os.path.join if is_local else posixpath.join lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN""" lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ ) lowercase_ : Any = 0 lowercase_ : Tuple = 0 lowercase_ : int = 0 lowercase_ : Dict = [] lowercase_ : Union[str, Any] = [] for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ): ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Union[str, Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase_ ) lowercase_ : List[str] = total_num_examples lowercase_ : int = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: lowercase_ : Tuple = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowercase_ : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowercase_ : int , lowercase_ : int , lowercase_ : int , ): rename( lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , ) lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 0 for i in range(len(lowercase_ ) ): lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i] for shard_id in range(lowercase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect() else: # don't use any pattern lowercase_ : List[str] = 0 lowercase_ : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
30
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType _lowercase : Tuple = logging.get_logger(__name__) _lowercase : Tuple = { "openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": "", } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''imagegpt''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[int] , lowercase_ : Any=512 + 1 , lowercase_ : int=32 * 32 , lowercase_ : Tuple=512 , lowercase_ : str=24 , lowercase_ : List[Any]=8 , lowercase_ : str=None , lowercase_ : int="quick_gelu" , lowercase_ : int=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Any=1E-5 , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : Any=False , lowercase_ : List[Any]=False , lowercase_ : Any=False , **lowercase_ : Any , ): lowercase_ : List[Any] = vocab_size lowercase_ : Optional[Any] = n_positions lowercase_ : str = n_embd lowercase_ : Tuple = n_layer lowercase_ : List[str] = n_head lowercase_ : Tuple = n_inner lowercase_ : List[Any] = activation_function lowercase_ : List[str] = resid_pdrop lowercase_ : str = embd_pdrop lowercase_ : str = attn_pdrop lowercase_ : Any = layer_norm_epsilon lowercase_ : str = initializer_range lowercase_ : Any = scale_attn_weights lowercase_ : Any = use_cache lowercase_ : int = scale_attn_by_inverse_layer_idx lowercase_ : str = reorder_and_upcast_attn lowercase_ : Optional[Any] = tie_word_embeddings super().__init__(tie_word_embeddings=lowercase_ , **lowercase_ ) class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : "FeatureExtractionMixin" , lowercase_ : int = 1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 3 , lowercase_ : int = 32 , lowercase_ : int = 32 , ): lowercase_ : Union[str, Any] = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase_ : Dict = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) ) return inputs
713
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Dict = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''flax''', '''transformers'''] def __init__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : int ): requires_backends(self , ["""flax""", """transformers"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Tuple , **lowercase_ : int ): requires_backends(cls , ["""flax""", """transformers"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : str ): requires_backends(cls , ["""flax""", """transformers"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''flax''', '''transformers'''] def __init__( self : Optional[Any] , *lowercase_ : Dict , **lowercase_ : Any ): requires_backends(self , ["""flax""", """transformers"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Tuple ): requires_backends(cls , ["""flax""", """transformers"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : str ): requires_backends(cls , ["""flax""", """transformers"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''flax''', '''transformers'''] def __init__( self : str , *lowercase_ : int , **lowercase_ : Union[str, Any] ): requires_backends(self , ["""flax""", """transformers"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Optional[int] , **lowercase_ : Any ): requires_backends(cls , ["""flax""", """transformers"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : int , **lowercase_ : str ): requires_backends(cls , ["""flax""", """transformers"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''flax''', '''transformers'''] def __init__( self : str , *lowercase_ : int , **lowercase_ : Optional[Any] ): requires_backends(self , ["""flax""", """transformers"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : List[Any] ): requires_backends(cls , ["""flax""", """transformers"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Dict , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""flax""", """transformers"""] )
714
'''simple docstring''' _lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def lowerCamelCase ( ) -> None: lowercase_ : List[Any] = input("""Enter message: """ ) lowercase_ : str = input("""Enter key [alphanumeric]: """ ) lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): lowercase_ : List[str] = """encrypt""" lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) elif mode.lower().startswith("""d""" ): lowercase_ : Any = """decrypt""" lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''\n{mode.title()}ed message:''' ) print(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: lowercase_ : Union[str, Any] = [] lowercase_ : List[Any] = 0 lowercase_ : str = key.upper() for symbol in message: lowercase_ : Tuple = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(UpperCAmelCase__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(UpperCAmelCase__ ): lowercase_ : Any = 0 else: translated.append(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) if __name__ == "__main__": main()
30
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowercase : List[str] = logging.get_logger(__name__) _lowercase : int = { "shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json", # See all Nat models at https://huggingface.co/models?filter=nat } class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase): UpperCamelCase__ = '''nat''' UpperCamelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[int] , lowercase_ : Optional[Any]=4 , lowercase_ : Any=3 , lowercase_ : List[str]=64 , lowercase_ : List[str]=[3, 4, 6, 5] , lowercase_ : Optional[int]=[2, 4, 8, 16] , lowercase_ : List[str]=7 , lowercase_ : int=3.0 , lowercase_ : Tuple=True , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : int=0.1 , lowercase_ : str="gelu" , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : int=0.0 , lowercase_ : int=None , lowercase_ : Union[str, Any]=None , **lowercase_ : str , ): super().__init__(**lowercase_ ) lowercase_ : List[Any] = patch_size lowercase_ : Optional[Any] = num_channels lowercase_ : Any = embed_dim lowercase_ : Any = depths lowercase_ : Dict = len(lowercase_ ) lowercase_ : Optional[Any] = num_heads lowercase_ : Union[str, Any] = kernel_size lowercase_ : Any = mlp_ratio lowercase_ : List[str] = qkv_bias lowercase_ : Dict = hidden_dropout_prob lowercase_ : List[Any] = attention_probs_dropout_prob lowercase_ : int = drop_path_rate lowercase_ : Optional[int] = hidden_act lowercase_ : List[str] = layer_norm_eps lowercase_ : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase_ : str = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) lowercase_ : Optional[Any] = layer_scale_init_value lowercase_ : Union[str, Any] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )] lowercase_ : int = get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
715
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = """ylacombe/bark-small""" lowercase_ : List[str] = tempfile.mkdtemp() lowercase_ : Tuple = """en_speaker_1""" lowercase_ : Union[str, Any] = """This is a test string""" lowercase_ : int = """speaker_embeddings_path.json""" lowercase_ : Any = """speaker_embeddings""" def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ): return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = self.get_tokenizer() lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase_ : Optional[int] = 35 lowercase_ : int = 2 lowercase_ : Union[str, Any] = 8 lowercase_ : Union[str, Any] = { """semantic_prompt""": np.ones(lowercase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : Dict = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ ) lowercase_ : List[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_tokenizer() lowercase_ : int = BarkProcessor(tokenizer=lowercase_ ) lowercase_ : Any = processor(text=self.input_string ) lowercase_ : List[str] = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
30
0
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = (DDIMParallelScheduler,) UpperCamelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def SCREAMING_SNAKE_CASE_ ( self : Any , **lowercase_ : str ): lowercase_ : Tuple = { """num_train_timesteps""": 1000, """beta_start""": 0.00_01, """beta_end""": 0.02, """beta_schedule""": """linear""", """clip_sample""": True, } config.update(**lowercase_ ) return config def SCREAMING_SNAKE_CASE_ ( self : Dict , **lowercase_ : List[str] ): lowercase_ : Optional[int] = self.scheduler_classes[0] lowercase_ : List[Any] = self.get_scheduler_config(**lowercase_ ) lowercase_ : Any = scheduler_class(**lowercase_ ) lowercase_ : Optional[int] = 10, 0.0 lowercase_ : Tuple = self.dummy_model() lowercase_ : Optional[int] = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for t in scheduler.timesteps: lowercase_ : Any = model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_ ) lowercase_ : Dict = self.scheduler_classes[0] lowercase_ : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) lowercase_ : int = scheduler_class(**lowercase_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): self.check_over_configs(thresholding=lowercase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): for t in [1, 10, 49]: self.check_over_forward(time_step=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=lowercase_ , eta=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Dict = self.scheduler_classes[0] lowercase_ : Dict = self.get_scheduler_config() lowercase_ : Optional[Any] = scheduler_class(**lowercase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[Any] = self.scheduler_classes[0] lowercase_ : List[Any] = self.get_scheduler_config() lowercase_ : str = scheduler_class(**lowercase_ ) lowercase_ : Dict = 10, 0.0 scheduler.set_timesteps(lowercase_ ) lowercase_ : Union[str, Any] = self.dummy_model() lowercase_ : int = self.dummy_sample_deter lowercase_ : List[Any] = self.dummy_sample_deter + 0.1 lowercase_ : Optional[Any] = self.dummy_sample_deter - 0.1 lowercase_ : int = samplea.shape[0] lowercase_ : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 ) lowercase_ : Dict = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ ) lowercase_ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowercase_ : Any = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ ) lowercase_ : Optional[int] = torch.sum(torch.abs(lowercase_ ) ) lowercase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : str = self.full_loop() lowercase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) ) lowercase_ : Union[str, Any] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Dict = self.full_loop(prediction_type="""v_prediction""" ) lowercase_ : Union[str, Any] = torch.sum(torch.abs(lowercase_ ) ) lowercase_ : List[str] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : int ): # We specify different beta, so that the first alpha is 0.99 lowercase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) lowercase_ : Tuple = torch.sum(torch.abs(lowercase_ ) ) lowercase_ : Optional[Any] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): # We specify different beta, so that the first alpha is 0.99 lowercase_ : Optional[Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) lowercase_ : Dict = torch.sum(torch.abs(lowercase_ ) ) lowercase_ : Dict = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
716
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True}) UpperCamelCase__ = Features({'''image''': Image()}) UpperCamelCase__ = Features({'''labels''': ClassLabel}) UpperCamelCase__ = "image" UpperCamelCase__ = "labels" def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowercase_ : List[str] = copy.deepcopy(self ) lowercase_ : List[str] = self.label_schema.copy() lowercase_ : List[Any] = features[self.label_column] lowercase_ : Optional[Any] = label_schema return task_template @property def SCREAMING_SNAKE_CASE_ ( self : int ): return { self.image_column: "image", self.label_column: "labels", }
30
0
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase ( UpperCAmelCase__ : int ) -> str: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) lowercase_ : Optional[Any] = precision lowercase_ : List[Any] = ceil(precision / 14 ) lowercase_ : Optional[int] = 426880 * Decimal(10005 ).sqrt() lowercase_ : List[str] = 1 lowercase_ : List[Any] = 13591409 lowercase_ : Union[str, Any] = Decimal(UpperCAmelCase__ ) for k in range(1 , UpperCAmelCase__ ): lowercase_ : str = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCAmelCase__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowercase : int = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
717
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowercase_ : str = 1.5 lowercase_ : List[Any] = int(factor * num_class_images ) lowercase_ : int = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ : List[str] = client.query(text=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase_ : List[str] = int(factor * num_images ) lowercase_ : List[str] = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , ) lowercase_ : List[str] = 0 lowercase_ : Dict = 0 lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open( F'''{class_data_dir}/images.txt''' , """w""" ) as fa: while total < num_class_images: lowercase_ : str = class_images[count] count += 1 try: lowercase_ : Union[str, Any] = requests.get(images["""url"""] ) if img.status_code == 200: lowercase_ : List[str] = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ ) return parser.parse_args() if __name__ == "__main__": _lowercase : Dict = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
30
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool: lowercase_ : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> tuple[int, int]: lowercase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowercase_ : int = x_den * y_den * z_den lowercase_ : int = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase ( UpperCAmelCase__ : int = 35 ) -> int: lowercase_ : set = set() lowercase_ : int lowercase_ : Fraction = Fraction(0 ) lowercase_ : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowercase_ : str = x_num * y_den + x_den * y_num lowercase_ : Optional[int] = x_den * y_den lowercase_ : Optional[int] = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : List[str] = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) # n=2 lowercase_ : List[str] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowercase_ : Optional[int] = x_den * x_den * y_den * y_den if is_sq(UpperCAmelCase__ ) and is_sq(UpperCAmelCase__ ): lowercase_ : Optional[Any] = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : Optional[Any] = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : List[Any] = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : List[Any] = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) # n=-1 lowercase_ : List[Any] = x_num * y_num lowercase_ : Any = x_den * y_num + x_num * y_den lowercase_ : Any = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : List[Any] = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) # n=2 lowercase_ : str = x_num * x_num * y_num * y_num lowercase_ : Tuple = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(UpperCAmelCase__ ) and is_sq(UpperCAmelCase__ ): lowercase_ : Optional[Any] = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : int = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : List[str] = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) for num, den in unique_s: total += Fraction(UpperCAmelCase__ , UpperCAmelCase__ ) return total.denominator + total.numerator if __name__ == "__main__": print(f"""{solution() = }""")
718
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None: if start is None: lowercase_ : Any = 0 if end is None: lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1 if start >= end: return lowercase_ : Optional[int] = (start + end) // 2 slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ ) if sequence[end] < sequence[mid]: lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end] slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
30
0
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int ) -> list[int]: lowercase_ : str = 0 lowercase_ : int = len(UpperCAmelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: lowercase_ : Dict = i + 1 else: lowercase_ : Union[str, Any] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
719
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowercase : Dict = parser.parse_args() _lowercase : Dict = "cpu" _lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowercase : Any = "path-to-your-trained-model" _lowercase : str = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowercase : Any = pipe.to(device) # to channels last _lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last) _lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last) _lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowercase : int = torch.randn(2, 4, 64, 64) _lowercase : int = torch.rand(1) * 999 _lowercase : Union[str, Any] = torch.randn(2, 77, 768) _lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status) try: _lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowercase : int = 666 _lowercase : Any = torch.Generator(device).manual_seed(seed) _lowercase : int = {"generator": generator} if args.steps is not None: _lowercase : Optional[int] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
30
0
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Dict: lowercase_ : List[str] = filter(lambda UpperCAmelCase__ : p.requires_grad , model.parameters() ) lowercase_ : Dict = sum([np.prod(p.size() ) for p in model_parameters] ) return params _lowercase : Dict = logging.getLogger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: if metric == "rouge2": lowercase_ : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}""" elif metric == "bleu": lowercase_ : str = """{val_avg_bleu:.4f}-{step_count}""" elif metric == "em": lowercase_ : List[Any] = """{val_avg_em:.4f}-{step_count}""" elif metric == "loss": lowercase_ : List[str] = """{val_avg_loss:.4f}-{step_count}""" else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' """ function.""" ) lowercase_ : Dict = ModelCheckpoint( dirpath=UpperCAmelCase__ , filename=UpperCAmelCase__ , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ) -> Tuple: return EarlyStopping( monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=UpperCAmelCase__ , verbose=UpperCAmelCase__ , ) class __magic_name__ ( pl.Callback): def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : List[str] ): lowercase_ : str = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowercase_ ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule , lowercase_ : str , lowercase_ : Dict=True ): logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) lowercase_ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} ) # Log results lowercase_ : Any = Path(pl_module.hparams.output_dir ) if type_path == "test": lowercase_ : List[Any] = od / """test_results.txt""" lowercase_ : List[Any] = od / """test_generations.txt""" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowercase_ : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' lowercase_ : str = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=lowercase_ ) generations_file.parent.mkdir(exist_ok=lowercase_ ) with open(lowercase_ , """a+""" ) as writer: for key in sorted(lowercase_ ): if key in ["log", "progress_bar", "preds"]: continue lowercase_ : List[str] = metrics[key] if isinstance(lowercase_ , torch.Tensor ): lowercase_ : Any = val.item() lowercase_ : Optional[int] = f'''{key}: {val:.6f}\n''' writer.write(lowercase_ ) if not save_generations: return if "preds" in metrics: lowercase_ : List[str] = """\n""".join(metrics["""preds"""] ) generations_file.open("""w+""" ).write(lowercase_ ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : List[str] ): try: lowercase_ : List[Any] = pl_module.model.model.num_parameters() except AttributeError: lowercase_ : List[Any] = pl_module.model.num_parameters() lowercase_ : Optional[Any] = count_trainable_parameters(lowercase_ ) # mp stands for million parameters trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowercase_ , lowercase_ , """test""" ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : int ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
720
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Optional[Any] = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _lowercase : Optional[Any] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" _lowercase : int = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n" _lowercase : str = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class __magic_name__ ( datasets.Metric): def SCREAMING_SNAKE_CASE_ ( self : int ): if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[ """https://github.com/jhclark/tercom""", ] , ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ): lowercase_ : Optional[int] = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) lowercase_ : str = [[refs[i] for refs in references] for i in range(lowercase_ )] lowercase_ : int = TER( normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , ) lowercase_ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
721
'''simple docstring''' import unittest import numpy as np def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray: lowercase_ : List[Any] = np.shape(UpperCAmelCase__ ) lowercase_ : Dict = np.shape(UpperCAmelCase__ ) lowercase_ : int = np.shape(UpperCAmelCase__ ) if shape_a[0] != shape_b[0]: lowercase_ : Optional[int] = ( """Expected the same number of rows for A and B. """ F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(UpperCAmelCase__ ) if shape_b[1] != shape_c[1]: lowercase_ : Optional[Any] = ( """Expected the same number of columns for B and C. """ F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(UpperCAmelCase__ ) lowercase_ : Any = pseudo_inv if a_inv is None: try: lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Dict = np.array([[2, 1], [6, 3]] ) lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] ) lowercase_ : Optional[int] = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) lowercase_ : int = np.linalg.det(lowercase_ ) self.assertAlmostEqual(lowercase_ , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowercase_ ): schur_complement(lowercase_ , lowercase_ , lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int , _lowercase : int = 0 , _lowercase : int = 0 ) ->int: '''simple docstring''' a : int = right or len(_lowercase ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(_lowercase , _lowercase , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
31
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int: '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" ) return min_val if option else max_val def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int: '''simple docstring''' return int((number_a + number_a) / 2 ) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None: '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)" ) if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value" ) def answer(_lowercase : int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started..." ) a : Optional[Any] = lower a : List[Any] = higher a : Tuple = [] while True: a : List[Any] = get_avg(_lowercase , _lowercase ) last_numbers.append(_lowercase ) if answer(_lowercase ) == "low": a : Optional[int] = number elif answer(_lowercase ) == "high": a : Tuple = number else: break print(F"""guess the number : {last_numbers[-1]}""" ) print(F"""details : {last_numbers!s}""" ) def _SCREAMING_SNAKE_CASE ( ) ->None: '''simple docstring''' a : Tuple = int(input("Enter lower value : " ).strip() ) a : Dict = int(input("Enter high value : " ).strip() ) a : Optional[int] = int(input("Enter value to guess : " ).strip() ) guess_the_number(_lowercase , _lowercase , _lowercase ) if __name__ == "__main__": main()
31
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Tuple = logging.get_logger(__name__) a : List[Any] = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class __UpperCamelCase ( a__ ): lowerCamelCase : List[str] ="""poolformer""" def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=4.0 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[64, 128, 320, 512] , lowerCAmelCase__=[7, 3, 3, 3] , lowerCAmelCase__=[4, 2, 2, 2] , lowerCAmelCase__=[2, 1, 1, 1] , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=True , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ) -> int: a : Optional[int] = num_channels a : Union[str, Any] = patch_size a : List[Any] = stride a : int = padding a : str = pool_size a : Dict = hidden_sizes a : Dict = mlp_ratio a : Optional[int] = depths a : List[str] = patch_sizes a : Tuple = strides a : Optional[Any] = num_encoder_blocks a : Optional[int] = drop_path_rate a : Optional[Any] = hidden_act a : int = use_layer_scale a : List[str] = layer_scale_init_value a : Tuple = initializer_range super().__init__(**lowerCAmelCase__ ) class __UpperCamelCase ( a__ ): lowerCamelCase : Any =version.parse("""1.11""" ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __a ( self ) -> float: return 2E-3
31
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort a : Any = logging.get_logger(__name__) a : Tuple = { '''tensor(bool)''': np.bool_, '''tensor(int8)''': np.inta, '''tensor(uint8)''': np.uinta, '''tensor(int16)''': np.intaa, '''tensor(uint16)''': np.uintaa, '''tensor(int32)''': np.intaa, '''tensor(uint32)''': np.uintaa, '''tensor(int64)''': np.intaa, '''tensor(uint64)''': np.uintaa, '''tensor(float16)''': np.floataa, '''tensor(float)''': np.floataa, '''tensor(double)''': np.floataa, } class __UpperCamelCase : def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str: logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) a : Optional[int] = model a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ ) a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ ) def __call__( self , **lowerCAmelCase__ ) -> Dict: a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()} return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ ) @staticmethod def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]: if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) a : List[str] = "CPUExecutionProvider" return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int: a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name ) a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ ) try: shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) a : str = self.model_save_dir.joinpath(lowerCAmelCase__ ) if src_path.exists(): a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ ) try: shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ ) except shutil.SameFileError: pass def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str: if os.path.isfile(lowerCAmelCase__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) # saving model weights/files self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]: a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowerCAmelCase__ ): a : Tuple = OnnxRuntimeModel.load_model( os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ ) a : Tuple = Path(lowerCAmelCase__ ) # load model from hub else: # download model a : Optional[Any] = hf_hub_download( repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , ) a : Optional[int] = Path(lowerCAmelCase__ ).parent a : List[Any] = Path(lowerCAmelCase__ ).name a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ ) return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]: a : Any = None if len(str(lowerCAmelCase__ ).split("@" ) ) == 2: a, a : Tuple = model_id.split("@" ) return cls._from_pretrained( model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
31
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a : int = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[Any] = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
31
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str: '''simple docstring''' a : Union[str, Any] = 1.5 a : List[str] = int(factor * num_class_images ) a : Optional[Any] = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 ) os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase ) if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: a : List[Any] = client.query(text=_lowercase ) if len(_lowercase ) >= factor * num_class_images or num_images > 1E4: break else: a : Optional[int] = int(factor * num_images ) a : str = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , ) a : Optional[int] = 0 a : str = 0 a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase ) with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open( F"""{class_data_dir}/images.txt""" , "w" ) as fa: while total < num_class_images: a : Optional[Any] = class_images[count] count += 1 try: a : str = requests.get(images["url"] ) if img.status_code == 200: a : int = Image.open(BytesIO(img.content ) ) with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def _SCREAMING_SNAKE_CASE ( ) ->Dict: '''simple docstring''' a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase ) parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase ) return parser.parse_args() if __name__ == "__main__": a : List[Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
31
1
"""simple docstring""" import random from .binary_exp_mod import bin_exp_mod def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Tuple=1000 ) ->Optional[int]: '''simple docstring''' if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd a : Tuple = n - 1 a : Any = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) a : Dict = 0 while count < prec: a : Tuple = random.randint(2 , n - 1 ) a : List[Any] = bin_exp_mod(_lowercase , _lowercase , _lowercase ) if b != 1: a : int = True for _ in range(_lowercase ): if b == n - 1: a : str = False break a : List[Any] = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": a : Dict = abs(int(input('''Enter bound : ''').strip())) print('''Here\'s the list of primes:''') print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
31
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]: '''simple docstring''' if subparsers is not None: a : int = subparsers.add_parser("tpu-config" , description=_description ) else: a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description ) # Core arguments a : Dict = parser.add_argument_group( "Config Arguments" , "Arguments that can be configured through `accelerate config`." ) config_args.add_argument( "--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , ) config_args.add_argument( "--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , ) config_args.add_argument( "--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , ) a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." ) pod_args.add_argument( "--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , ) pod_args.add_argument( "--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , ) pod_args.add_argument( "--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , ) pod_args.add_argument( "--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , ) pod_args.add_argument( "--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , ) pod_args.add_argument( "--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." ) if subparsers is not None: parser.set_defaults(func=_lowercase ) return parser def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple: '''simple docstring''' a : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(_lowercase ): a : Optional[Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: a : int = defaults.command_file if not args.command and defaults.commands is not None: a : Union[str, Any] = defaults.commands if not args.tpu_name: a : int = defaults.tpu_name if not args.tpu_zone: a : Union[str, Any] = defaults.tpu_zone if args.accelerate_version == "dev": a : int = "git+https://github.com/huggingface/accelerate.git" elif args.accelerate_version == "latest": a : Optional[Any] = "accelerate -U" elif isinstance(parse(args.accelerate_version ) , _lowercase ): a : Optional[Any] = F"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("You must specify either a command file or a command to run on the pod." ) if args.command_file: with open(args.command_file , "r" ) as f: a : int = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , _lowercase ): a : Union[str, Any] = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate a : Tuple = ["cd /usr/share"] if args.install_accelerate: new_cmd += [F"""pip install {args.accelerate_version}"""] new_cmd += args.command a : List[Any] = "; ".join(_lowercase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess a : str = ["gcloud"] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F"""Running {' '.join(_lowercase )}""" ) return subprocess.run(_lowercase ) print("Successfully setup pod." ) def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' a : List[Any] = tpu_command_parser() a : Optional[int] = parser.parse_args() tpu_command_launcher(_lowercase )
31
1
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin a : int = get_tests_dir('''fixtures/test_sentencepiece.model''') a : int = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} a : Optional[int] = '''>>zh<<''' a : Any = '''Helsinki-NLP/''' if is_torch_available(): a : Optional[Any] = '''pt''' elif is_tf_available(): a : Any = '''tf''' else: a : int = '''jax''' @require_sentencepiece class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : int =MarianTokenizer lowerCamelCase : str =False lowerCamelCase : str =True def __a ( self ) -> Tuple: super().setUp() a : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] a : Dict = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) a : Dict = Path(self.tmpdirname ) save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["target_spm"] ) a : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self , **lowerCAmelCase__ ) -> MarianTokenizer: return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]: return ( "This is a test", "This is a test", ) def __a ( self ) -> Any: a : List[Any] = "</s>" a : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def __a ( self ) -> List[str]: a : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(lowerCAmelCase__ ) , 9 ) def __a ( self ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def __a ( self ) -> List[str]: a : Dict = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" ) a : Any = en_de_tokenizer(["I am a small frog"] , return_tensors=lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) a : Optional[Any] = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(lowerCAmelCase__ , batch.input_ids[0] ) a : Dict = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowerCAmelCase__ ) a : Union[str, Any] = [x.name for x in Path(lowerCAmelCase__ ).glob("*" )] self.assertIn("source.spm" , lowerCAmelCase__ ) MarianTokenizer.from_pretrained(lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a : Any = self.get_tokenizer() a : List[Any] = tok( ["I am a small frog" * 1000, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def __a ( self ) -> int: a : str = self.get_tokenizer() a : Tuple = tok(["I am a tiny frog", "I am a small frog"] , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def __a ( self ) -> Dict: # fmt: off a : List[Any] = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def __a ( self ) -> int: a : str = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) a : Union[str, Any] = "Tämä on testi" a : Any = "This is a test" a : Optional[Any] = [76, 7, 2047, 2] a : Dict = [69, 12, 11, 940, 2] a : int = tokenizer(lowerCAmelCase__ ).input_ids self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : int = tokenizer(text_target=lowerCAmelCase__ ).input_ids self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : str = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
31
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: a : Tuple = None a : int = logging.get_logger(__name__) a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} a : Optional[int] = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } a : int = { '''facebook/nllb-large-en-ro''': 1024, '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class __UpperCamelCase ( a__ ): lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""] lowerCamelCase : Union[str, Any] =NllbTokenizer lowerCamelCase : List[int] =[] lowerCamelCase : List[int] =[] def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token a : Optional[Any] = legacy_behaviour super().__init__( vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , ) a : int = vocab_file a : Any = False if not self.vocab_file else True a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) a : str = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } a : List[Any] = src_lang if src_lang is not None else "eng_Latn" a : str = self.convert_tokens_to_ids(self._src_lang ) a : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __a ( self ) -> str: return self._src_lang @src_lang.setter def __a ( self , lowerCAmelCase__ ) -> None: a : List[str] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : str = [self.sep_token_id] a : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) a : Dict = src_lang a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ ) a : Any = tgt_lang_id return inputs def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding: a : Optional[int] = src_lang a : int = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) def __a ( self ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def __a ( self ) -> str: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __a ( self , lowerCAmelCase__ ) -> None: a : int = self.convert_tokens_to_ids(lowerCAmelCase__ ) if self.legacy_behaviour: a : Tuple = [] a : List[str] = [self.eos_token_id, self.cur_lang_code] else: a : int = [self.cur_lang_code] a : int = [self.eos_token_id] a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) a : Any = self.convert_ids_to_tokens(self.suffix_tokens ) a : Any = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __a ( self , lowerCAmelCase__ ) -> None: a : str = self.convert_tokens_to_ids(lowerCAmelCase__ ) if self.legacy_behaviour: a : Optional[Any] = [] a : int = [self.eos_token_id, self.cur_lang_code] else: a : List[Any] = [self.cur_lang_code] a : List[Any] = [self.eos_token_id] a : int = self.convert_ids_to_tokens(self.prefix_tokens ) a : int = self.convert_ids_to_tokens(self.suffix_tokens ) a : Any = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return a : Any = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
31
1
"""simple docstring""" import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase : def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> Union[str, Any]: a : Any = parent a : List[Any] = batch_size a : List[str] = image_size a : str = patch_size a : Union[str, Any] = num_channels a : Optional[Any] = is_training a : Optional[int] = use_labels a : Optional[int] = hidden_size a : List[Any] = num_hidden_layers a : List[Any] = num_attention_heads a : Tuple = intermediate_size a : Optional[Any] = hidden_act a : Dict = hidden_dropout_prob a : Optional[Any] = attention_probs_dropout_prob a : str = type_sequence_label_size a : Union[str, Any] = initializer_range a : Dict = scope a : Tuple = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a : Tuple = (image_size // patch_size) ** 2 a : int = num_patches + 1 def __a ( self ) -> List[str]: a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a : Union[str, Any] = None if self.use_labels: a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a : Any = self.get_config() return config, pixel_values, labels def __a ( self ) -> List[Any]: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: a : Union[str, Any] = ViTModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a : Tuple = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: a : str = ViTForMaskedImageModeling(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a : Any = model(lowerCAmelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images a : int = 1 a : List[str] = ViTForMaskedImageModeling(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a : Any = model(lowerCAmelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: a : Optional[int] = self.type_sequence_label_size a : str = ViTForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a : Optional[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a : int = 1 a : Dict = ViTForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a : Dict = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self ) -> Tuple: a : List[Any] = self.prepare_config_and_inputs() ( ( a ), ( a ), ( a ), ) : int = config_and_inputs a : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( a__ , a__ , unittest.TestCase ): lowerCamelCase : int =( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) lowerCamelCase : Any =( {"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification} if is_torch_available() else {} ) lowerCamelCase : Tuple =True lowerCamelCase : Optional[Any] =False lowerCamelCase : List[Any] =False lowerCamelCase : Union[str, Any] =False def __a ( self ) -> int: a : Optional[Any] = ViTModelTester(self ) a : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 ) def __a ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def __a ( self ) -> Any: pass def __a ( self ) -> Optional[Any]: a, a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Optional[Any] = model_class(lowerCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) ) def __a ( self ) -> List[str]: a, a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Optional[int] = model_class(lowerCAmelCase__ ) a : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a : Tuple = [*signature.parameters.keys()] a : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __a ( self ) -> List[Any]: a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __a ( self ) -> Tuple: a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ ) def __a ( self ) -> List[str]: a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @slow def __a ( self ) -> List[str]: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Dict = ViTModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( ) ->Any: '''simple docstring''' a : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __a ( self ) -> str: return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self ) -> Any: a : Tuple = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCAmelCase__ ) a : str = self.default_image_processor a : Optional[Any] = prepare_img() a : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): a : str = model(**lowerCAmelCase__ ) # verify the logits a : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) a : Dict = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) @slow def __a ( self ) -> Optional[Any]: # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. a : Optional[Any] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCAmelCase__ ) a : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 ) a : Optional[Any] = prepare_img() a : Any = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ) a : Dict = inputs.pixel_values.to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): a : Union[str, Any] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__ ) # verify the logits a : Dict = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ ) a : Optional[int] = torch.tensor( [[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __a ( self ) -> Optional[int]: a : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" ) a : Optional[int] = self.default_image_processor a : Tuple = prepare_img() a : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ) a : int = inputs.pixel_values.to(lowerCAmelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): a : Dict = model(lowerCAmelCase__ )
31
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCamelCase ( a__ ): lowerCamelCase : torch.FloatTensor lowerCamelCase : torch.FloatTensor lowerCamelCase : Optional[torch.FloatTensor] =None class __UpperCamelCase ( a__ , a__ ): lowerCamelCase : Tuple =2 @register_to_config def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]: # standard deviation of the initial noise distribution a : Tuple = sigma_max # setable values a : int = None a : np.IntTensor = None a : torch.FloatTensor = None # sigma(t_i) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor: return sample def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]: a : List[Any] = num_inference_steps a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ ) a : List[str] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]: if self.config.s_min <= sigma <= self.config.s_max: a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: a : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device ) a : Any = sigma + gamma * sigma a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]: a : Union[str, Any] = sample_hat + sigma_hat * model_output a : Tuple = (sample_hat - pred_original_sample) / sigma_hat a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]: a : Optional[int] = sample_prev + sigma_prev * model_output a : str = (sample_prev - pred_original_sample) / sigma_prev a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: raise NotImplementedError()
31
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( ) ->list[list[int]]: '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] a : Any = generate_large_matrix() a : Optional[int] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def _SCREAMING_SNAKE_CASE ( _lowercase : list[list[int]] ) ->None: '''simple docstring''' assert all(row == sorted(_lowercase , reverse=_lowercase ) for row in grid ) assert all(list(_lowercase ) == sorted(_lowercase , reverse=_lowercase ) for col in zip(*_lowercase ) ) def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] ) ->int: '''simple docstring''' a : Optional[Any] = 0 a : List[Any] = len(_lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: a : int = (left + right) // 2 a : Dict = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: a : Union[str, Any] = mid + 1 else: a : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(_lowercase ) def _SCREAMING_SNAKE_CASE ( _lowercase : list[list[int]] ) ->int: '''simple docstring''' a : Optional[int] = 0 a : Tuple = len(grid[0] ) for i in range(len(_lowercase ) ): a : Union[str, Any] = find_negative_index(grid[i][:bound] ) total += bound return (len(_lowercase ) * len(grid[0] )) - total def _SCREAMING_SNAKE_CASE ( _lowercase : list[list[int]] ) ->int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def _SCREAMING_SNAKE_CASE ( _lowercase : list[list[int]] ) ->int: '''simple docstring''' a : str = 0 for row in grid: for i, number in enumerate(_lowercase ): if number < 0: total += len(_lowercase ) - i break return total def _SCREAMING_SNAKE_CASE ( ) ->None: '''simple docstring''' from timeit import timeit print("Running benchmarks" ) a : Union[str, Any] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): a : Optional[Any] = timeit(F"""{func}(grid=grid)""" , setup=_lowercase , number=500 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
31
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal a : Optional[Any] = datasets.utils.logging.get_logger(__name__) a : Union[str, Any] = ['''names''', '''prefix'''] a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] a : Any = ['''encoding_errors''', '''on_bad_lines'''] a : List[str] = ['''date_format'''] @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): lowerCamelCase : str ="," lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[Union[int, List[int], str]] ="infer" lowerCamelCase : Optional[List[str]] =None lowerCamelCase : Optional[List[str]] =None lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None lowerCamelCase : Optional[Union[List[int], List[str]]] =None lowerCamelCase : Optional[str] =None lowerCamelCase : bool =True lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None lowerCamelCase : Optional[list] =None lowerCamelCase : Optional[list] =None lowerCamelCase : bool =False lowerCamelCase : Optional[Union[int, List[int]]] =None lowerCamelCase : Optional[int] =None lowerCamelCase : Optional[Union[str, List[str]]] =None lowerCamelCase : bool =True lowerCamelCase : bool =True lowerCamelCase : bool =False lowerCamelCase : bool =True lowerCamelCase : Optional[str] =None lowerCamelCase : str ="." lowerCamelCase : Optional[str] =None lowerCamelCase : str ='"' lowerCamelCase : int =0 lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[str] =None lowerCamelCase : bool =True lowerCamelCase : bool =True lowerCamelCase : int =0 lowerCamelCase : bool =True lowerCamelCase : bool =False lowerCamelCase : Optional[str] =None lowerCamelCase : int =1_0000 lowerCamelCase : Optional[datasets.Features] =None lowerCamelCase : Optional[str] ="strict" lowerCamelCase : Literal["error", "warn", "skip"] ="error" lowerCamelCase : Optional[str] =None def __a ( self ) -> Dict: if self.delimiter is not None: a : int = self.delimiter if self.column_names is not None: a : Any = self.column_names @property def __a ( self ) -> List[str]: a : Dict = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class __UpperCamelCase ( datasets.ArrowBasedBuilder ): lowerCamelCase : Union[str, Any] =CsvConfig def __a ( self ) -> Optional[Any]: return datasets.DatasetInfo(features=self.config.features ) def __a ( self , lowerCAmelCase__ ) -> Optional[int]: if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): a : Tuple = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Tuple = [files] a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] a : int = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Any = [files] a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def __a ( self , lowerCAmelCase__ ) -> pa.Table: if self.config.features is not None: a : Optional[Any] = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def __a ( self , lowerCAmelCase__ ) -> Any: a : Tuple = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str a : Any = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): a : Any = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" ) raise
31
1
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : List[Any] = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : int =DebertaVaTokenizer lowerCamelCase : List[str] =DebertaVaTokenizerFast lowerCamelCase : Tuple =True lowerCamelCase : Any =True def __a ( self ) -> List[str]: super().setUp() # We have a SentencePiece fixture for testing a : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self , lowerCAmelCase__ ) -> Tuple: a : Tuple = "this is a test" a : int = "this is a test" return input_text, output_text def __a ( self ) -> int: a : Any = "<pad>" a : Dict = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def __a ( self ) -> List[str]: a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(lowerCAmelCase__ ) , 3_0001 ) def __a ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 ) def __a ( self ) -> List[Any]: # fmt: off a : Tuple = " \tHeLLo!how \n Are yoU? " a : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on a : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ ) a : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[str] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ ) a : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __a ( self ) -> List[Any]: pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __a ( self ) -> List[str]: pass def __a ( self ) -> Any: # fmt: off a : Union[str, Any] = "I was born in 92000, and this is falsé." a : str = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on a : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ ) a : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : Dict = DebertaVaTokenizerFast(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ ) a : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Any: # fmt: off a : Any = "I was born in 92000, and this is falsé." a : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on a : Optional[int] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ ) a : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[Any] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ ) a : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: # fmt: off a : Optional[int] = "I was born in 92000, and this is falsé." a : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on a : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ ) a : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : Dict = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ ) a : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: # fmt: off a : Dict = "I was born in 92000, and this is falsé." a : str = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on a : Optional[Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ ) a : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : str = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ ) a : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Dict: # fmt: off a : Dict = " \tHeLLo!how \n Are yoU? " a : Optional[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on a : List[Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ ) a : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[str] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ ) a : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> List[str]: a : List[str] = self.get_tokenizer() a : Optional[Any] = self.get_rust_tokenizer() a : Union[str, Any] = "I was born in 92000, and this is falsé." a : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) a : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) a : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : Any = self.get_rust_tokenizer() a : Dict = tokenizer.encode(lowerCAmelCase__ ) a : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Dict: a : Tuple = "This is a test" a : Optional[int] = [13, 1, 4398, 25, 21, 1289] a : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"] a : Optional[Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] a : int = DebertaVaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) a : Union[str, Any] = DebertaVaTokenizerFast(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) a : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[Any] = tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : int = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[str] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[str] = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # fmt: off a : Dict = "I was born in 92000, and this is falsé." a : Optional[Any] = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] a : Optional[int] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] a : int = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on a : Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : int = tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : str = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Tuple: a : Optional[int] = DebertaVaTokenizer(lowerCAmelCase__ ) a : Optional[Any] = tokenizer.encode("sequence builders" ) a : int = tokenizer.encode("multi-sequence build" ) a : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) a : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase__ ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase__ , ) @slow def __a ( self ) -> List[str]: # fmt: off a : Any = {"input_ids": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
31
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( a__ , a__ , unittest.TestCase ): lowerCamelCase : Dict =IFPipeline lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""} lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""} def __a ( self ) -> List[str]: return self._get_dummy_components() def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict: if str(lowerCAmelCase__ ).startswith("mps" ): a : Tuple = torch.manual_seed(lowerCAmelCase__ ) else: a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self ) -> Union[str, Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __a ( self ) -> Union[str, Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __a ( self ) -> Optional[int]: self._test_save_load_local() def __a ( self ) -> Tuple: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self ) -> str: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ) -> Tuple: # if a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) a : str = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() a : Optional[int] = None a : Optional[int] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components ) a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: # pipeline 1 _start_torch_memory_measurement() a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) a : Dict = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (64, 64, 3) a : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 a : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) # pipeline 2 _start_torch_memory_measurement() a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Union[str, Any] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (256, 256, 3) a : int = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 a : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: # pipeline 1 _start_torch_memory_measurement() a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) a : List[Any] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , ) a : Tuple = output.images[0] assert image.shape == (64, 64, 3) a : int = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 a : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) # pipeline 2 _start_torch_memory_measurement() a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Dict = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ) a : int = output.images[0] assert image.shape == (256, 256, 3) a : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 a : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: # pipeline 1 _start_torch_memory_measurement() a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ ) a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) a : List[str] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , ) a : List[Any] = output.images[0] assert image.shape == (64, 64, 3) a : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 a : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) # pipeline 2 _start_torch_memory_measurement() a : str = torch.Generator(device="cpu" ).manual_seed(0 ) a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ ) a : Optional[int] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (256, 256, 3) a : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 a : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( ) ->List[str]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
31
1
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' a : str = 10 a : Union[str, Any] = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string" ) ), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ), "answers": datasets.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), "id": datasets.Value("int64" ), } ) a : Any = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(_lowercase ) ), } , features=_lowercase , ) return dataset @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Tuple ) ->Union[str, Any]: '''simple docstring''' a : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "file.arrow" ) dataset.map(cache_file_name=_lowercase ) return filename # FILE_CONTENT + files a : str = '''\ Text data. Second line of data.''' @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Tuple: '''simple docstring''' a : Dict = tmp_path_factory.mktemp("data" ) / "file.txt" a : str = FILE_CONTENT with open(_lowercase , "w" ) as f: f.write(_lowercase ) return filename @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->str: '''simple docstring''' import bza a : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.bz2" a : Union[str, Any] = bytes(_lowercase , "utf-8" ) with bza.open(_lowercase , "wb" ) as f: f.write(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Optional[int]: '''simple docstring''' import gzip a : List[Any] = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" ) a : List[str] = bytes(_lowercase , "utf-8" ) with gzip.open(_lowercase , "wb" ) as f: f.write(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Any: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame a : Any = tmp_path_factory.mktemp("data" ) / "file.txt.lz4" a : int = bytes(_lowercase , "utf-8" ) with lza.frame.open(_lowercase , "wb" ) as f: f.write(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Any ) ->Optional[int]: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr a : Any = tmp_path_factory.mktemp("data" ) / "file.txt.7z" with pyazr.SevenZipFile(_lowercase , "w" ) as archive: archive.write(_lowercase , arcname=os.path.basename(_lowercase ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : List[Any] ) ->int: '''simple docstring''' import tarfile a : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar" with tarfile.TarFile(_lowercase , "w" ) as f: f.add(_lowercase , arcname=os.path.basename(_lowercase ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->List[str]: '''simple docstring''' import lzma a : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.xz" a : Dict = bytes(_lowercase , "utf-8" ) with lzma.open(_lowercase , "wb" ) as f: f.write(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Optional[int] ) ->str: '''simple docstring''' import zipfile a : Any = tmp_path_factory.mktemp("data" ) / "file.txt.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.basename(_lowercase ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->Dict: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd a : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.zst" a : Any = bytes(_lowercase , "utf-8" ) with zstd.open(_lowercase , "wb" ) as f: f.write(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->List[str]: '''simple docstring''' a : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.xml" a : List[Any] = textwrap.dedent( "\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" ) with open(_lowercase , "w" ) as f: f.write(_lowercase ) return filename a : Union[str, Any] = [ {'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0}, {'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0}, {'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0}, {'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0}, ] a : str = [ {'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0}, {'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0}, ] a : Tuple = { '''col_1''': ['''0''', '''1''', '''2''', '''3'''], '''col_2''': [0, 1, 2, 3], '''col_3''': [0.0, 1.0, 2.0, 3.0], } a : Any = [ {'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0}, {'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1}, ] a : Dict = [ {'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0}, {'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0}, {'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0}, {'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0}, ] @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( ) ->Any: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]: '''simple docstring''' a : List[str] = datasets.Dataset.from_dict(_lowercase ) a : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" ) dataset.map(cache_file_name=_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->List[str]: '''simple docstring''' a : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" ) with contextlib.closing(sqlitea.connect(_lowercase ) ) as con: a : List[str] = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" ) for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->int: '''simple docstring''' a : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" ) with open(_lowercase , "w" , newline="" ) as f: a : Optional[Any] = csv.DictWriter(_lowercase , fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->str: '''simple docstring''' a : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" ) with open(_lowercase , "w" , newline="" ) as f: a : List[str] = csv.DictWriter(_lowercase , fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : List[Any] ) ->str: '''simple docstring''' import bza a : Any = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2" with open(_lowercase , "rb" ) as f: a : Tuple = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(_lowercase , "wb" ) as f: f.write(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Dict ) ->List[Any]: '''simple docstring''' a : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.basename(_lowercase ) ) f.write(_lowercase , arcname=os.path.basename(_lowercase ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Any , _lowercase : Dict ) ->Any: '''simple docstring''' a : Any = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) ) f.write(_lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Any ) ->Any: '''simple docstring''' a : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.join("main_dir" , os.path.basename(_lowercase ) ) ) f.write(_lowercase , arcname=os.path.join("main_dir" , os.path.basename(_lowercase ) ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Tuple: '''simple docstring''' a : int = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" ) a : List[str] = pa.schema( { "col_1": pa.string(), "col_2": pa.intaa(), "col_3": pa.floataa(), } ) with open(_lowercase , "wb" ) as f: a : List[Any] = pq.ParquetWriter(_lowercase , schema=_lowercase ) a : str = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_lowercase ) )] for k in DATA[0]} , schema=_lowercase ) writer.write_table(_lowercase ) writer.close() return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Dict: '''simple docstring''' a : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) a : List[str] = {"data": DATA} with open(_lowercase , "w" ) as f: json.dump(_lowercase , _lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->List[str]: '''simple docstring''' a : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) a : Dict = {"data": DATA_DICT_OF_LISTS} with open(_lowercase , "w" ) as f: json.dump(_lowercase , _lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->List[str]: '''simple docstring''' a : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" ) with open(_lowercase , "w" ) as f: for item in DATA: f.write(json.dumps(_lowercase ) + "\n" ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Any: '''simple docstring''' a : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" ) with open(_lowercase , "w" ) as f: for item in DATA: f.write(json.dumps(_lowercase ) + "\n" ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[Any]: '''simple docstring''' a : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" ) with open(_lowercase , "w" ) as f: for item in DATA_312: f.write(json.dumps(_lowercase ) + "\n" ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Any: '''simple docstring''' a : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" ) with open(_lowercase , "w" ) as f: for item in DATA_STR: f.write(json.dumps(_lowercase ) + "\n" ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : List[Any] ) ->Union[str, Any]: '''simple docstring''' import gzip a : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" ) with open(_lowercase , "rb" ) as orig_file: with gzip.open(_lowercase , "wb" ) as zipped_file: zipped_file.writelines(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Dict ) ->List[str]: '''simple docstring''' import gzip a : str = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" ) with open(_lowercase , "rb" ) as orig_file: with gzip.open(_lowercase , "wb" ) as zipped_file: zipped_file.writelines(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : List[Any] ) ->str: '''simple docstring''' a : str = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.basename(_lowercase ) ) f.write(_lowercase , arcname=os.path.basename(_lowercase ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : List[str] , _lowercase : int , _lowercase : Tuple ) ->Union[str, Any]: '''simple docstring''' a : int = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.join("nested" , os.path.basename(_lowercase ) ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Dict , _lowercase : Optional[int] ) ->List[str]: '''simple docstring''' a : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.join("main_dir" , os.path.basename(_lowercase ) ) ) f.write(_lowercase , arcname=os.path.join("main_dir" , os.path.basename(_lowercase ) ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Tuple ) ->Optional[Any]: '''simple docstring''' a : List[str] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar" with tarfile.TarFile(_lowercase , "w" ) as f: f.add(_lowercase , arcname=os.path.basename(_lowercase ) ) f.add(_lowercase , arcname=os.path.basename(_lowercase ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Tuple , _lowercase : Tuple , _lowercase : Tuple ) ->Union[str, Any]: '''simple docstring''' a : Any = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar" with tarfile.TarFile(_lowercase , "w" ) as f: f.add(_lowercase , arcname=os.path.join("nested" , os.path.basename(_lowercase ) ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str: '''simple docstring''' a : List[Any] = ["0", "1", "2", "3"] a : int = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" ) with open(_lowercase , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->str: '''simple docstring''' a : int = ["0", "1", "2", "3"] a : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" ) with open(_lowercase , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->str: '''simple docstring''' a : List[Any] = ["0", "1", "2", "3"] a : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.abc" with open(_lowercase , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : List[str] , _lowercase : Any ) ->int: '''simple docstring''' a : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.basename(_lowercase ) ) f.write(_lowercase , arcname=os.path.basename(_lowercase ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str , _lowercase : Dict ) ->List[str]: '''simple docstring''' a : List[str] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.join("main_dir" , os.path.basename(_lowercase ) ) ) f.write(_lowercase , arcname=os.path.join("main_dir" , os.path.basename(_lowercase ) ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Dict , _lowercase : str ) ->Tuple: '''simple docstring''' a : str = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.basename("unsupported.ext" ) ) f.write(_lowercase , arcname=os.path.basename("unsupported_2.ext" ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Union[str, Any]: '''simple docstring''' a : List[Any] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] ) a : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" ) with open(_lowercase , "w" , encoding="utf-8" ) as f: f.write(_lowercase ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( ) ->int: '''simple docstring''' return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" ) @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( ) ->str: '''simple docstring''' return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" ) @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : int ) ->List[str]: '''simple docstring''' a : List[str] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip" with zipfile.ZipFile(_lowercase , "w" ) as f: f.write(_lowercase , arcname=os.path.basename(_lowercase ) ) f.write(_lowercase , arcname=os.path.basename(_lowercase ).replace(".jpg" , "2.jpg" ) ) return path @pytest.fixture(scope="session" ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->int: '''simple docstring''' a : str = tmp_path_factory.mktemp("data_dir" ) (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt" , "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / "subdir" / "test.txt" , "w" ) as f: f.write("bar\n" * 10 ) # hidden file with open(data_dir / "subdir" / ".test.txt" , "w" ) as f: f.write("bar\n" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt" , "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / ".subdir" / "test.txt" , "w" ) as f: f.write("bar\n" * 10 ) return data_dir
31
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> Optional[Any]: a : Optional[int] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Optional[Any]: a : str = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Dict: a : List[str] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> List[Any]: a : Optional[Any] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Tuple: a : Tuple = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Dict: a : Dict = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] a : Dict = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> List[str]: a : List[Any] = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] a : Any = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> int: # pass variant but use the non-variant filenames a : int = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] a : Tuple = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> str: a : str = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] a : Any = "fp16" self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> str: a : Union[str, Any] = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] a : str = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> List[str]: # pass variant but use the non-variant filenames a : Optional[int] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] a : str = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> Optional[Any]: a : Any = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] a : Optional[int] = "fp16" self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
31
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ) ->int: '''simple docstring''' if index == r: for j in range(_lowercase ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location a : Any = arr[i] combination_util(_lowercase , _lowercase , _lowercase , index + 1 , _lowercase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Tuple , _lowercase : List[Any] ) ->Optional[Any]: '''simple docstring''' a : Tuple = [0] * r # Print all combination using temporary array 'data[]' combination_util(_lowercase , _lowercase , _lowercase , 0 , _lowercase , 0 ) if __name__ == "__main__": # Driver code to check the function above a : Union[str, Any] = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
31
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class __UpperCamelCase ( nn.Module ): lowerCamelCase : int lowerCamelCase : jnp.dtype =jnp.floataa def __a ( self ) -> Tuple: a : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , lowerCAmelCase__ ) -> Optional[Any]: a, a, a, a : List[str] = hidden_states.shape a : List[Any] = jax.image.resize( lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) a : List[str] = self.conv(lowerCAmelCase__ ) return hidden_states class __UpperCamelCase ( nn.Module ): lowerCamelCase : int lowerCamelCase : jnp.dtype =jnp.floataa def __a ( self ) -> Dict: a : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , lowerCAmelCase__ ) -> Tuple: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) a : Tuple = self.conv(lowerCAmelCase__ ) return hidden_states class __UpperCamelCase ( nn.Module ): lowerCamelCase : int lowerCamelCase : int =None lowerCamelCase : float =0.0 lowerCamelCase : bool =None lowerCamelCase : jnp.dtype =jnp.floataa def __a ( self ) -> int: a : Dict = self.in_channels if self.out_channels is None else self.out_channels a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) a : List[Any] = nn.Conv( lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype ) a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) a : Optional[int] = nn.Dropout(self.dropout_prob ) a : Dict = nn.Conv( lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut a : List[str] = None if use_nin_shortcut: a : Optional[Any] = nn.Conv( lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str: a : int = hidden_states a : Tuple = self.norma(lowerCAmelCase__ ) a : Any = nn.swish(lowerCAmelCase__ ) a : int = self.conva(lowerCAmelCase__ ) a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) ) a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 ) a : Dict = hidden_states + temb a : str = self.norma(lowerCAmelCase__ ) a : List[Any] = nn.swish(lowerCAmelCase__ ) a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[str] = self.conva(lowerCAmelCase__ ) if self.conv_shortcut is not None: a : Tuple = self.conv_shortcut(lowerCAmelCase__ ) return hidden_states + residual
31
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int: '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError("Input value must be an 'int' type" ) a : List[Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
31
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]: '''simple docstring''' if subparsers is not None: a : Dict = subparsers.add_parser("test" ) else: a : Tuple = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=_lowercase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=_lowercase ) return parser def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str: '''simple docstring''' a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: a : int = script_name else: a : int = F"""--config_file={args.config_file} {script_name}""" a : Optional[int] = ["accelerate-launch"] + test_args.split() a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' a : Any = test_command_parser() a : Union[str, Any] = parser.parse_args() test_command(_lowercase ) if __name__ == "__main__": main()
31
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class __UpperCamelCase : lowerCamelCase : Optional[Any] =MBartConfig lowerCamelCase : Optional[Any] ={} lowerCamelCase : List[str] ="""gelu""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=20 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ) -> str: a : str = parent a : int = batch_size a : Any = seq_length a : Union[str, Any] = is_training a : List[Any] = use_labels a : int = vocab_size a : Union[str, Any] = hidden_size a : List[str] = num_hidden_layers a : Optional[Any] = num_attention_heads a : List[str] = intermediate_size a : Union[str, Any] = hidden_dropout_prob a : Any = attention_probs_dropout_prob a : List[str] = max_position_embeddings a : Tuple = eos_token_id a : Dict = pad_token_id a : List[str] = bos_token_id def __a ( self ) -> Optional[int]: a : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) a : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) a : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a : Optional[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) a : int = prepare_mbart_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return config, inputs_dict def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: a : Tuple = TFMBartModel(config=lowerCAmelCase__ ).get_decoder() a : Tuple = inputs_dict["input_ids"] a : List[Any] = input_ids[:1, :] a : List[Any] = inputs_dict["attention_mask"][:1, :] a : Tuple = inputs_dict["head_mask"] a : Tuple = 1 # first forward pass a : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) a, a : str = outputs.to_tuple() a : Dict = past_key_values[1] def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=None , _lowercase : Optional[Any]=None , _lowercase : str=None , _lowercase : Dict=None , _lowercase : Tuple=None , ) ->Any: '''simple docstring''' if attention_mask is None: a : List[Any] = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: a : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: a : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: a : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: a : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __UpperCamelCase ( a__ , a__ , unittest.TestCase ): lowerCamelCase : Dict =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCamelCase : List[str] =(TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCamelCase : Dict =( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCamelCase : List[Any] =True lowerCamelCase : Union[str, Any] =False lowerCamelCase : Tuple =False def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __a ( self ) -> List[Any]: a : List[Any] = TFMBartModelTester(self ) a : str = ConfigTester(self , config_class=lowerCAmelCase__ ) def __a ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def __a ( self ) -> int: a : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ ) @require_sentencepiece @require_tokenizers @require_tf class __UpperCamelCase ( unittest.TestCase ): lowerCamelCase : Optional[Any] =[ """ UN Chief Says There Is No Military Solution in Syria""", ] lowerCamelCase : Tuple =[ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] lowerCamelCase : Dict ="""facebook/mbart-large-en-ro""" @cached_property def __a ( self ) -> Tuple: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __a ( self ) -> Any: a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __a ( self , **lowerCAmelCase__ ) -> List[str]: a : Optional[int] = self.translate_src_text(**lowerCAmelCase__ ) self.assertListEqual(self.expected_text , lowerCAmelCase__ ) def __a ( self , **lowerCAmelCase__ ) -> Dict: a : Union[str, Any] = self.tokenizer(self.src_text , **lowerCAmelCase__ , return_tensors="tf" ) a : Optional[int] = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) a : Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) return generated_words @slow def __a ( self ) -> int: self._assert_generated_batch_equal_expected()
31
"""simple docstring""" a : str = 8.314_4598 def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' if temperature < 0: raise Exception("Temperature cannot be less than 0 K" ) if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a : Any = 300 a : Dict = 28 a : Dict = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
31
1
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 a : Any = { # 1536-bit 5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 2048-bit 14: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 3072-bit 15: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 4096-bit 16: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 6144-bit 17: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 8192-bit 18: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, } class __UpperCamelCase : def __init__( self , lowerCAmelCase__ = 14 ) -> None: if group not in primes: raise ValueError("Unsupported Group" ) a : List[Any] = primes[group]["prime"] a : List[Any] = primes[group]["generator"] a : List[Any] = int(hexlify(urandom(32 ) ) , base=16 ) def __a ( self ) -> str: return hex(self.__private_key )[2:] def __a ( self ) -> str: a : Optional[int] = pow(self.generator , self.__private_key , self.prime ) return hex(lowerCAmelCase__ )[2:] def __a ( self , lowerCAmelCase__ ) -> bool: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(lowerCAmelCase__ , (self.prime - 1) // 2 , self.prime ) == 1 ) def __a ( self , lowerCAmelCase__ ) -> str: a : Optional[int] = int(lowerCAmelCase__ , base=16 ) if not self.is_valid_public_key(lowerCAmelCase__ ): raise ValueError("Invalid public key" ) a : Optional[Any] = pow(lowerCAmelCase__ , self.__private_key , self.prime ) return shaaaa(str(lowerCAmelCase__ ).encode() ).hexdigest() @staticmethod def __a ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(lowerCAmelCase__ , (prime - 1) // 2 , lowerCAmelCase__ ) == 1 ) @staticmethod def __a ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 14 ) -> str: a : Any = int(lowerCAmelCase__ , base=16 ) a : Any = int(lowerCAmelCase__ , base=16 ) a : Tuple = primes[group]["prime"] if not DiffieHellman.is_valid_public_key_static(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError("Invalid public key" ) a : str = pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return shaaaa(str(lowerCAmelCase__ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
31
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __UpperCamelCase ( unittest.TestCase ): def __a ( self , lowerCAmelCase__ ) -> Optional[int]: a : str = 3 a : str = 250 a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ ) a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length return input_ids, scores def __a ( self ) -> List[Any]: a, a : str = self._get_tensors(5 ) a : Any = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : str = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> List[Any]: a : Optional[Any] = MaxLengthCriteria(max_length=10 ) a, a : int = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> List[str]: a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) a, a : str = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a : List[Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __a ( self ) -> str: a, a : Tuple = self._get_tensors(5 ) a : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> str: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(lowerCAmelCase__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(lowerCAmelCase__ ) , 1 )
31
1
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __UpperCamelCase ( unittest.TestCase ): def __a ( self , lowerCAmelCase__ ) -> Optional[int]: a : str = 3 a : str = 250 a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ ) a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length return input_ids, scores def __a ( self ) -> List[Any]: a, a : str = self._get_tensors(5 ) a : Any = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : str = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> List[Any]: a : Optional[Any] = MaxLengthCriteria(max_length=10 ) a, a : int = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> List[str]: a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) a, a : str = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a : List[Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __a ( self ) -> str: a, a : Tuple = self._get_tensors(5 ) a : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> str: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(lowerCAmelCase__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(lowerCAmelCase__ ) , 1 )
31
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int: '''simple docstring''' a : Dict = [1, 2, 5, 10, 20, 50, 100, 200] a : Optional[Any] = [0] * (pence + 1) a : List[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_lowercase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73682
31
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[Any] = logging.get_logger(__name__) a : Dict = { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''', '''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''', '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''', '''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''', } class __UpperCamelCase ( a__ ): lowerCamelCase : Dict ="""funnel""" lowerCamelCase : Any ={ """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", } def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=[4, 4, 4] , lowerCAmelCase__=None , lowerCAmelCase__=2 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=64 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=None , lowerCAmelCase__=1E-9 , lowerCAmelCase__="mean" , lowerCAmelCase__="relative_shift" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int: a : Tuple = vocab_size a : List[Any] = block_sizes a : Dict = [1] * len(lowerCAmelCase__ ) if block_repeats is None else block_repeats assert len(lowerCAmelCase__ ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." a : Dict = num_decoder_layers a : Optional[int] = d_model a : str = n_head a : Optional[Any] = d_head a : Union[str, Any] = d_inner a : List[Any] = hidden_act a : List[str] = hidden_dropout a : Any = attention_dropout a : Optional[int] = activation_dropout a : List[str] = initializer_range a : List[str] = initializer_std a : Any = layer_norm_eps assert pooling_type in [ "mean", "max", ], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported.""" a : Optional[int] = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported.""" a : int = attention_type a : Optional[Any] = separate_cls a : List[str] = truncate_seq a : str = pool_q_only super().__init__(**lowerCAmelCase__ ) @property def __a ( self ) -> Tuple: return sum(self.block_sizes ) @num_hidden_layers.setter def __a ( self , lowerCAmelCase__ ) -> List[Any]: raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." ) @property def __a ( self ) -> Any: return len(self.block_sizes ) @num_blocks.setter def __a ( self , lowerCAmelCase__ ) -> List[str]: raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
31
"""simple docstring""" from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=a__ ): lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""] def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]: requires_backends(self , ["transformers", "torch", "note_seq"] ) @classmethod def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any: requires_backends(cls , ["transformers", "torch", "note_seq"] ) @classmethod def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int: requires_backends(cls , ["transformers", "torch", "note_seq"] )
31
1
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( '''The `image_to_image.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionImg2ImgPipeline` instead.''' )
31
"""simple docstring""" import qiskit def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts: '''simple docstring''' a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" ) # Create a Quantum Circuit acting on the q register a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_lowercase ) if __name__ == "__main__": print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
31
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int: '''simple docstring''' assert isinstance(_lowercase , _lowercase ), F"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: a : Dict = F"""The input value of [n={number}] has to be > 0""" raise ValueError(_lowercase ) else: a : List[Any] = sylvester(number - 1 ) a : str = num - 1 a : Union[str, Any] = num return lower * upper + 1 if __name__ == "__main__": print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
31
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict: '''simple docstring''' a : List[str] = 0 if start < end: a : Tuple = randint(_lowercase , _lowercase ) a : List[str] = a[end] a : str = a[pivot] a : Optional[int] = temp a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase ) count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 ) count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase ) return count def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str: '''simple docstring''' a : Union[str, Any] = 0 a : List[Any] = randint(_lowercase , _lowercase ) a : int = a[end] a : List[str] = a[pivot] a : Tuple = temp a : Union[str, Any] = start - 1 for index in range(_lowercase , _lowercase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value a : List[str] = new_pivot_index + 1 a : Optional[int] = a[new_pivot_index] a : Union[str, Any] = a[index] a : List[Any] = temp a : Tuple = a[new_pivot_index + 1] a : str = a[end] a : Dict = temp return new_pivot_index + 1, count a : int = TemporaryFile() a : Tuple = 100 # 1000 elements are to be sorted a , a : int = 0, 1 # mean and standard deviation a : List[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array a : int = np.load(outfile) a : Tuple = len(M) - 1 a : Union[str, Any] = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
31
1
"""simple docstring""" from __future__ import annotations import requests a : str = set( '''approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports'''.split() ) def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int = 1 , _lowercase : str = "new" , _lowercase : list | None = None ) ->dict: '''simple docstring''' a : Dict = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(_lowercase ) - valid_terms ) ): a : Any = F"""Invalid search term: {invalid_search_terms}""" raise ValueError(_lowercase ) a : str = requests.get( F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError a : Optional[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(_lowercase )} a : int = {} for id_ in range(_lowercase ): a : Tuple = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
31
"""simple docstring""" import baseaa def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes: '''simple docstring''' return baseaa.aaaencode(string.encode("utf-8" ) ) def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str: '''simple docstring''' return baseaa.aaadecode(_lowercase ).decode("utf-8" ) if __name__ == "__main__": import doctest doctest.testmod()
31
1
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup a : Union[str, Any] = logging.get_logger(__name__) class __UpperCamelCase ( a__ ): def __init__( self , **lowerCAmelCase__ ) -> int: requires_backends(self , ["bs4"] ) super().__init__(**lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]: a : Optional[int] = [] a : Optional[Any] = [] a : Dict = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag a : List[str] = parent.find_all(child.name , recursive=lowerCAmelCase__ ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(lowerCAmelCase__ ) else next(i for i, s in enumerate(lowerCAmelCase__ , 1 ) if s is child ) ) a : Optional[int] = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def __a ( self , lowerCAmelCase__ ) -> int: a : str = BeautifulSoup(lowerCAmelCase__ , "html.parser" ) a : Optional[int] = [] a : List[Any] = [] a : List[str] = [] for element in html_code.descendants: if type(lowerCAmelCase__ ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue a : List[Any] = html.unescape(lowerCAmelCase__ ).strip() if not text_in_this_tag: continue all_doc_strings.append(lowerCAmelCase__ ) a, a : List[Any] = self.xpath_soup(lowerCAmelCase__ ) stringaxtag_seq.append(lowerCAmelCase__ ) stringaxsubs_seq.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError("Number of doc strings and xtags does not correspond" ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError("Number of doc strings and xsubs does not correspond" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: a : int = "" for tagname, subs in zip(lowerCAmelCase__ , lowerCAmelCase__ ): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self , lowerCAmelCase__ ) -> BatchFeature: a : List[Any] = False # Check that strings has a valid type if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Tuple = True elif isinstance(lowerCAmelCase__ , (list, tuple) ): if len(lowerCAmelCase__ ) == 0 or isinstance(html_strings[0] , lowerCAmelCase__ ): a : int = True if not valid_strings: raise ValueError( "HTML strings must of type `str`, `List[str]` (batch of examples), " f"""but is of type {type(lowerCAmelCase__ )}.""" ) a : Optional[Any] = bool(isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , lowerCAmelCase__ )) ) if not is_batched: a : Dict = [html_strings] # Get nodes + xpaths a : Optional[Any] = [] a : Dict = [] for html_string in html_strings: a, a, a : str = self.get_three_from_single(lowerCAmelCase__ ) nodes.append(lowerCAmelCase__ ) a : Dict = [] for node, tag_list, sub_list in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): a : Union[str, Any] = self.construct_xpath(lowerCAmelCase__ , lowerCAmelCase__ ) xpath_strings.append(lowerCAmelCase__ ) xpaths.append(lowerCAmelCase__ ) # return as Dict a : Optional[Any] = {"nodes": nodes, "xpaths": xpaths} a : List[Any] = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ ) return encoded_inputs
31
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: a : Tuple = None a : Any = logging.get_logger(__name__) a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} a : str = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } a : str = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } a : Union[str, Any] = '''▁''' class __UpperCamelCase ( a__ ): lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : List[Any] =AlbertTokenizer def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. a : Optional[int] = ( AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token ) super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) a : Dict = do_lower_case a : Any = remove_space a : Optional[Any] = keep_accents a : List[str] = vocab_file a : Optional[Any] = False if not self.vocab_file else True def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : Optional[Any] = [self.sep_token_id] a : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : Optional[Any] = [self.sep_token_id] a : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a : Dict = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
31
1
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging a : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class __UpperCamelCase ( a__ ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> int: super().__init__() if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=lowerCAmelCase__ , speech_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , ) def __a ( self , lowerCAmelCase__ = "auto" ) -> int: if slice_size == "auto": a : Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase__ ) def __a ( self ) -> Tuple: self.enable_attention_slicing(lowerCAmelCase__ ) @torch.no_grad() def __call__( self , lowerCAmelCase__ , lowerCAmelCase__=1_6000 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , **lowerCAmelCase__ , ) -> Optional[Any]: a : Optional[int] = self.speech_processor.feature_extractor( lowerCAmelCase__ , return_tensors="pt" , sampling_rate=lowerCAmelCase__ ).input_features.to(self.device ) a : Optional[Any] = self.speech_model.generate(lowerCAmelCase__ , max_length=48_0000 ) a : Tuple = self.speech_processor.tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , normalize=lowerCAmelCase__ )[ 0 ] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Optional[Any] = 1 elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Any = len(lowerCAmelCase__ ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(lowerCAmelCase__ )}.""" ) # get prompt text embeddings a : Union[str, Any] = self.tokenizer( lowerCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) a : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: a : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) a : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length] a : int = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method a, a, a : Dict = text_embeddings.shape a : Any = text_embeddings.repeat(1 , lowerCAmelCase__ , 1 ) a : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a : List[str] if negative_prompt is None: a : Any = [""] * batch_size elif type(lowerCAmelCase__ ) is not type(lowerCAmelCase__ ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__ )} !=""" f""" {type(lowerCAmelCase__ )}.""" ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Optional[Any] = [negative_prompt] elif batch_size != len(lowerCAmelCase__ ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__ )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" " the batch size of `prompt`." ) else: a : Optional[int] = negative_prompt a : Dict = text_input_ids.shape[-1] a : Any = self.tokenizer( lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" , ) a : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method a : int = uncond_embeddings.shape[1] a : int = uncond_embeddings.repeat(1 , lowerCAmelCase__ , 1 ) a : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) a : Tuple = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps a : Union[str, Any] = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device="cpu" , dtype=lowerCAmelCase__ ).to( self.device ) else: a : Optional[Any] = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) a : str = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(lowerCAmelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand a : Optional[Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler a : List[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a : List[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a : Union[str, Any] = {} if accepts_eta: a : Any = eta for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ): # expand the latents if we are doing classifier free guidance a : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a : List[str] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) # predict the noise residual a : Optional[int] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample # perform guidance if do_classifier_free_guidance: a, a : Union[str, Any] = noise_pred.chunk(2 ) a : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 a : List[str] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) a : Optional[Any] = 1 / 0.18_215 * latents a : Union[str, Any] = self.vae.decode(lowerCAmelCase__ ).sample a : int = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a : Optional[int] = self.numpy_to_pil(lowerCAmelCase__ ) if not return_dict: return image return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
31
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch a : List[Any] = random.Random() def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]: '''simple docstring''' if rng is None: a : Tuple = global_rng a : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class __UpperCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]: a : int = parent a : Tuple = batch_size a : Dict = min_seq_length a : Any = max_seq_length a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) a : Union[str, Any] = feature_size a : Tuple = padding_value a : str = sampling_rate a : Dict = do_normalize a : str = num_mel_bins a : List[str] = hop_length a : str = win_length a : Optional[Any] = win_function a : List[str] = fmin a : Any = fmax a : Optional[int] = mel_floor a : Tuple = return_attention_mask def __a ( self ) -> Optional[Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple: def _flatten(lowerCAmelCase__ ): return list(itertools.chain(*lowerCAmelCase__ ) ) if equal_length: a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size a : str = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs] return speech_inputs def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict: if equal_length: a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size a : Any = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs] return speech_inputs @require_torch class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Tuple =SpeechTaFeatureExtractor def __a ( self ) -> Union[str, Any]: a : Tuple = SpeechTaFeatureExtractionTester(self ) def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]: self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) ) def __a ( self ) -> Union[str, Any]: # Tests that all call wrap to encode_plus and batch_encode_plus a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs] # Test not batched input a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test batched a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def __a ( self ) -> Optional[Any]: a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : int = ["longest", "max_length", "do_not_pad"] a : Tuple = [None, 1600, None] for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ): a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" ) a : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __a ( self ) -> str: a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : List[str] = range(800 , 1400 , 200 ) a : List[str] = [floats_list((1, x) )[0] for x in lengths] a : Any = ["longest", "max_length", "do_not_pad"] a : Any = [None, 1600, None] for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ): a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ ) a : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __a ( self ) -> Dict: a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : Union[str, Any] = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" ) a : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __a ( self ) -> Dict: a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : List[Any] = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" ) a : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : int = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" ) a : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def __a ( self ) -> List[str]: a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : Any = np.random.rand(100 ).astype(np.floataa ) a : Optional[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __a ( self ) -> Tuple: # Tests that all call wrap to encode_plus and batch_encode_plus a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs] # Test feature size a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test batched a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] a : List[Any] = np.asarray(lowerCAmelCase__ ) a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def __a ( self ) -> str: a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target() a : Any = self.feature_extraction_class(**self.feat_extract_dict ) a : Union[str, Any] = feat_extract.model_input_names[0] a : List[str] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) ) a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ ) a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" ) a : Tuple = processed_features[input_name] if len(batch_features_input.shape ) < 3: a : Dict = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __a ( self ) -> Tuple: a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ ) a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) a : Optional[int] = feat_extract.model_input_names[0] a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" ) a : Tuple = processed_features[input_name] if len(batch_features_input.shape ) < 3: a : List[str] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __a ( self ) -> Optional[Any]: a : Dict = self.feature_extraction_class(**self.feat_extract_dict ) a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target() a : Optional[Any] = feat_extract.model_input_names[0] a : List[str] = BatchFeature({input_name: speech_inputs} ) a : Tuple = feat_extract.num_mel_bins # hack! a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name] a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def __a ( self ) -> Union[str, Any]: a : Any = self.feat_extract_dict a : Optional[Any] = True a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ ) a : Any = self.feat_extract_tester.prepare_inputs_for_target() a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs] a : int = feat_extract.model_input_names[0] a : List[Any] = BatchFeature({input_name: speech_inputs} ) a : Union[str, Any] = feat_extract.num_mel_bins # hack! a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" ) self.assertIn("attention_mask" , lowerCAmelCase__ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a : Tuple = self.feat_extract_dict a : str = True a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ ) a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target() a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs] a : Optional[Any] = feat_extract.model_input_names[0] a : str = BatchFeature({input_name: speech_inputs} ) a : Optional[Any] = min(lowerCAmelCase__ ) a : List[Any] = feat_extract.num_mel_bins # hack! a : Any = feat_extract.pad( lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" ) self.assertIn("attention_mask" , lowerCAmelCase__ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def __a ( self , lowerCAmelCase__ ) -> Optional[int]: from datasets import load_dataset a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def __a ( self ) -> Union[str, Any]: # fmt: off a : List[Any] = torch.tensor( [2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03, 3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03, 2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04, 4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03, 7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04, 4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] ) # fmt: on a : List[str] = self._load_datasamples(1 ) a : Union[str, Any] = SpeechTaFeatureExtractor() a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 9_3680) ) self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) ) def __a ( self ) -> Union[str, Any]: # fmt: off a : Tuple = torch.tensor( [-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777, -3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386, -3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571, -3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] ) # fmt: on a : Dict = self._load_datasamples(1 ) a : Tuple = SpeechTaFeatureExtractor() a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
31
1
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class __UpperCamelCase ( datasets.BeamBasedBuilder ): def __a ( self ) -> Any: return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCAmelCase__ , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ ) class __UpperCamelCase ( datasets.BeamBasedBuilder ): def __a ( self ) -> str: return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCAmelCase__ , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( ) ->List[str]: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class __UpperCamelCase ( a__ ): @require_beam def __a ( self ) -> Optional[int]: a : List[Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: a : Any = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) a : Optional[Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , lowerCAmelCase__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase__ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self ) -> Union[str, Any]: import apache_beam as beam a : int = beam.io.parquetio.WriteToParquet a : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: a : Dict = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: a : str = partial(lowerCAmelCase__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( lowerCAmelCase__ , builder.name , "default" , "0.0.0" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( lowerCAmelCase__ , builder.name , "default" , "0.0.0" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) a : List[str] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , lowerCAmelCase__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmp_cache_dir: a : int = DummyBeamDataset(cache_dir=lowerCAmelCase__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self ) -> Optional[Any]: a : List[Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: a : int = NestedBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) a : Dict = builder.as_dataset() self.assertEqual(dset["train"].num_rows , lowerCAmelCase__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase__ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
31
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int: '''simple docstring''' a : int = {} a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"] a : Any = len(example["content"] ) / len(output["input_ids"] ) return output a : int = HfArgumentParser(PretokenizationArguments) a : Optional[int] = parser.parse_args() if args.num_workers is None: a : Tuple = multiprocessing.cpu_count() a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir) a : Dict = time.time() a : Tuple = load_dataset(args.dataset_name, split='''train''') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') a : Dict = time.time() a : Tuple = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') a : Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
31
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Dict = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
31
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer a : List[Any] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast a : Union[str, Any] = TaTokenizerFast a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = [ '''MT5EncoderModel''', '''MT5ForConditionalGeneration''', '''MT5ForQuestionAnswering''', '''MT5Model''', '''MT5PreTrainedModel''', '''MT5Stack''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model'''] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys a : List[Any] = _LazyModule( __name__, globals()['''__file__'''], _import_structure, extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast}, module_spec=__spec__, )
31
1
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> str: a : Dict = inspect.getfile(accelerate.test_utils ) a : str = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 a : str = test_metrics @require_cpu def __a ( self ) -> Tuple: debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __a ( self ) -> Union[str, Any]: debug_launcher(self.test_metrics.main ) @require_single_gpu def __a ( self ) -> int: self.test_metrics.main() @require_multi_gpu def __a ( self ) -> int: print(f"""Found {torch.cuda.device_count()} devices.""" ) a : Any = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
31
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int: '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" ) return min_val if option else max_val def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int: '''simple docstring''' return int((number_a + number_a) / 2 ) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None: '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)" ) if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value" ) def answer(_lowercase : int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started..." ) a : Optional[Any] = lower a : List[Any] = higher a : Tuple = [] while True: a : List[Any] = get_avg(_lowercase , _lowercase ) last_numbers.append(_lowercase ) if answer(_lowercase ) == "low": a : Optional[int] = number elif answer(_lowercase ) == "high": a : Tuple = number else: break print(F"""guess the number : {last_numbers[-1]}""" ) print(F"""details : {last_numbers!s}""" ) def _SCREAMING_SNAKE_CASE ( ) ->None: '''simple docstring''' a : Tuple = int(input("Enter lower value : " ).strip() ) a : Dict = int(input("Enter high value : " ).strip() ) a : Optional[int] = int(input("Enter value to guess : " ).strip() ) guess_the_number(_lowercase , _lowercase , _lowercase ) if __name__ == "__main__": main()
31
1
"""simple docstring""" import math def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->str: '''simple docstring''' a : Tuple = 0 a : Any = 0 while num > 0: a : Optional[int] = num % 8 a : Union[str, Any] = octal + (remainder * math.floor(math.pow(10 , _lowercase ) )) counter += 1 a : str = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F"""0o{int(_lowercase )}""" def _SCREAMING_SNAKE_CASE ( ) ->None: '''simple docstring''' print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(65 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(216 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(512 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
31
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort a : Any = logging.get_logger(__name__) a : Tuple = { '''tensor(bool)''': np.bool_, '''tensor(int8)''': np.inta, '''tensor(uint8)''': np.uinta, '''tensor(int16)''': np.intaa, '''tensor(uint16)''': np.uintaa, '''tensor(int32)''': np.intaa, '''tensor(uint32)''': np.uintaa, '''tensor(int64)''': np.intaa, '''tensor(uint64)''': np.uintaa, '''tensor(float16)''': np.floataa, '''tensor(float)''': np.floataa, '''tensor(double)''': np.floataa, } class __UpperCamelCase : def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str: logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) a : Optional[int] = model a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ ) a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ ) def __call__( self , **lowerCAmelCase__ ) -> Dict: a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()} return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ ) @staticmethod def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]: if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) a : List[str] = "CPUExecutionProvider" return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int: a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name ) a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ ) try: shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) a : str = self.model_save_dir.joinpath(lowerCAmelCase__ ) if src_path.exists(): a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ ) try: shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ ) except shutil.SameFileError: pass def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str: if os.path.isfile(lowerCAmelCase__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) # saving model weights/files self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]: a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowerCAmelCase__ ): a : Tuple = OnnxRuntimeModel.load_model( os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ ) a : Tuple = Path(lowerCAmelCase__ ) # load model from hub else: # download model a : Optional[Any] = hf_hub_download( repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , ) a : Optional[int] = Path(lowerCAmelCase__ ).parent a : List[Any] = Path(lowerCAmelCase__ ).name a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ ) return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]: a : Any = None if len(str(lowerCAmelCase__ ).split("@" ) ) == 2: a, a : Tuple = model_id.split("@" ) return cls._from_pretrained( model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
31
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: a : int = None a : Any = logging.get_logger(__name__) a : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} a : Tuple = { '''vocab_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''', }, } a : Any = { '''google/fnet-base''': 512, '''google/fnet-large''': 512, } a : Tuple = '''▁''' class __UpperCamelCase ( a__ ): lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[int] =["""input_ids""", """token_type_ids"""] lowerCamelCase : List[str] =FNetTokenizer def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Any: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. a : str = ( AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token ) super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) a : Any = do_lower_case a : str = remove_space a : Any = keep_accents a : Optional[Any] = vocab_file a : int = False if not self.vocab_file else True def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : Optional[Any] = [self.sep_token_id] a : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : Any = [self.sep_token_id] a : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a : Union[str, Any] = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
31
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str: '''simple docstring''' a : Union[str, Any] = 1.5 a : List[str] = int(factor * num_class_images ) a : Optional[Any] = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 ) os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase ) if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: a : List[Any] = client.query(text=_lowercase ) if len(_lowercase ) >= factor * num_class_images or num_images > 1E4: break else: a : Optional[int] = int(factor * num_images ) a : str = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , ) a : Optional[int] = 0 a : str = 0 a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase ) with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open( F"""{class_data_dir}/images.txt""" , "w" ) as fa: while total < num_class_images: a : Optional[Any] = class_images[count] count += 1 try: a : str = requests.get(images["url"] ) if img.status_code == 200: a : int = Image.open(BytesIO(img.content ) ) with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def _SCREAMING_SNAKE_CASE ( ) ->Dict: '''simple docstring''' a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase ) parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase ) return parser.parse_args() if __name__ == "__main__": a : List[Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
31
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int: '''simple docstring''' return number | (1 << position) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int: '''simple docstring''' return number & ~(1 << position) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int: '''simple docstring''' return number ^ (1 << position) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->bool: '''simple docstring''' return ((number >> position) & 1) == 1 def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int: '''simple docstring''' return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
31
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]: '''simple docstring''' if subparsers is not None: a : int = subparsers.add_parser("tpu-config" , description=_description ) else: a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description ) # Core arguments a : Dict = parser.add_argument_group( "Config Arguments" , "Arguments that can be configured through `accelerate config`." ) config_args.add_argument( "--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , ) config_args.add_argument( "--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , ) config_args.add_argument( "--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , ) a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." ) pod_args.add_argument( "--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , ) pod_args.add_argument( "--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , ) pod_args.add_argument( "--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , ) pod_args.add_argument( "--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , ) pod_args.add_argument( "--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , ) pod_args.add_argument( "--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." ) if subparsers is not None: parser.set_defaults(func=_lowercase ) return parser def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple: '''simple docstring''' a : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(_lowercase ): a : Optional[Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: a : int = defaults.command_file if not args.command and defaults.commands is not None: a : Union[str, Any] = defaults.commands if not args.tpu_name: a : int = defaults.tpu_name if not args.tpu_zone: a : Union[str, Any] = defaults.tpu_zone if args.accelerate_version == "dev": a : int = "git+https://github.com/huggingface/accelerate.git" elif args.accelerate_version == "latest": a : Optional[Any] = "accelerate -U" elif isinstance(parse(args.accelerate_version ) , _lowercase ): a : Optional[Any] = F"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("You must specify either a command file or a command to run on the pod." ) if args.command_file: with open(args.command_file , "r" ) as f: a : int = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , _lowercase ): a : Union[str, Any] = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate a : Tuple = ["cd /usr/share"] if args.install_accelerate: new_cmd += [F"""pip install {args.accelerate_version}"""] new_cmd += args.command a : List[Any] = "; ".join(_lowercase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess a : str = ["gcloud"] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F"""Running {' '.join(_lowercase )}""" ) return subprocess.run(_lowercase ) print("Successfully setup pod." ) def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' a : List[Any] = tpu_command_parser() a : Optional[int] = parser.parse_args() tpu_command_launcher(_lowercase )
31
1
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( a__ ): lowerCamelCase : List[Any] =["""image_processor""", """tokenizer"""] lowerCamelCase : Any ="""CLIPImageProcessor""" lowerCamelCase : List[str] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""") def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Any: a : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowerCAmelCase__ , ) a : int = kwargs.pop("feature_extractor" ) a : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: a : Optional[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) if images is not None: a : str = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) if text is not None and images is not None: a : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ ) def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any: return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @property def __a ( self ) -> Union[str, Any]: a : List[str] = self.tokenizer.model_input_names a : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
31
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: a : Tuple = None a : int = logging.get_logger(__name__) a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} a : Optional[int] = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } a : int = { '''facebook/nllb-large-en-ro''': 1024, '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class __UpperCamelCase ( a__ ): lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""] lowerCamelCase : Union[str, Any] =NllbTokenizer lowerCamelCase : List[int] =[] lowerCamelCase : List[int] =[] def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token a : Optional[Any] = legacy_behaviour super().__init__( vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , ) a : int = vocab_file a : Any = False if not self.vocab_file else True a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) a : str = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } a : List[Any] = src_lang if src_lang is not None else "eng_Latn" a : str = self.convert_tokens_to_ids(self._src_lang ) a : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __a ( self ) -> str: return self._src_lang @src_lang.setter def __a ( self , lowerCAmelCase__ ) -> None: a : List[str] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : str = [self.sep_token_id] a : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) a : Dict = src_lang a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ ) a : Any = tgt_lang_id return inputs def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding: a : Optional[int] = src_lang a : int = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) def __a ( self ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def __a ( self ) -> str: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __a ( self , lowerCAmelCase__ ) -> None: a : int = self.convert_tokens_to_ids(lowerCAmelCase__ ) if self.legacy_behaviour: a : Tuple = [] a : List[str] = [self.eos_token_id, self.cur_lang_code] else: a : int = [self.cur_lang_code] a : int = [self.eos_token_id] a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) a : Any = self.convert_ids_to_tokens(self.suffix_tokens ) a : Any = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __a ( self , lowerCAmelCase__ ) -> None: a : str = self.convert_tokens_to_ids(lowerCAmelCase__ ) if self.legacy_behaviour: a : Optional[Any] = [] a : int = [self.eos_token_id, self.cur_lang_code] else: a : List[Any] = [self.cur_lang_code] a : List[Any] = [self.eos_token_id] a : int = self.convert_ids_to_tokens(self.prefix_tokens ) a : int = self.convert_ids_to_tokens(self.suffix_tokens ) a : Any = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return a : Any = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
31
1
"""simple docstring""" import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->int: '''simple docstring''' a : str = torch.exp(_lowercase ) a : int = torch.sum(_lowercase , dim=1 ) # sum of exp(x_i) a : Union[str, Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(_lowercase ) - B / A class __UpperCamelCase ( nn.Module ): def __init__( self , lowerCAmelCase__ ) -> Tuple: super().__init__() a : Any = config.output_attentions a : Dict = config.output_hidden_states a : Any = nn.ModuleList([BertLayer(lowerCAmelCase__ ) for _ in range(config.num_hidden_layers )] ) a : List[Any] = nn.ModuleList([BertHighway(lowerCAmelCase__ ) for _ in range(config.num_hidden_layers )] ) a : Optional[int] = [-1 for _ in range(config.num_hidden_layers )] def __a ( self , lowerCAmelCase__ ) -> Any: if (type(lowerCAmelCase__ ) is float) or (type(lowerCAmelCase__ ) is int): for i in range(len(self.early_exit_entropy ) ): a : str = x else: a : Tuple = x def __a ( self , lowerCAmelCase__ ) -> int: a : str = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Dict: a : Union[str, Any] = () a : Optional[Any] = () a : Any = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: a : Dict = all_hidden_states + (hidden_states,) a : str = layer_module( lowerCAmelCase__ , lowerCAmelCase__ , head_mask[i] , lowerCAmelCase__ , lowerCAmelCase__ ) a : Dict = layer_outputs[0] if self.output_attentions: a : Optional[Any] = all_attentions + (layer_outputs[1],) a : List[Any] = (hidden_states,) if self.output_hidden_states: a : Dict = current_outputs + (all_hidden_states,) if self.output_attentions: a : Any = current_outputs + (all_attentions,) a : Any = self.highway[i](lowerCAmelCase__ ) # logits, pooled_output if not self.training: a : List[str] = highway_exit[0] a : int = entropy(lowerCAmelCase__ ) a : List[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy a : Tuple = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: a : Optional[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(lowerCAmelCase__ , i + 1 ) else: a : Optional[Any] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: a : Dict = all_hidden_states + (hidden_states,) a : Any = (hidden_states,) if self.output_hidden_states: a : Union[str, Any] = outputs + (all_hidden_states,) if self.output_attentions: a : Optional[Any] = outputs + (all_attentions,) a : Union[str, Any] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( """The Bert Model transformer with early exiting (DeeBERT). """ , a__ , ) class __UpperCamelCase ( a__ ): def __init__( self , lowerCAmelCase__ ) -> Any: super().__init__(lowerCAmelCase__ ) a : int = config a : Tuple = BertEmbeddings(lowerCAmelCase__ ) a : str = DeeBertEncoder(lowerCAmelCase__ ) a : List[str] = BertPooler(lowerCAmelCase__ ) self.init_weights() def __a ( self ) -> List[Any]: self.encoder.init_highway_pooler(self.pooler ) def __a ( self ) -> Union[str, Any]: return self.embeddings.word_embeddings def __a ( self , lowerCAmelCase__ ) -> Any: a : Dict = value def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(lowerCAmelCase__ ) @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> List[Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: a : List[Any] = input_ids.size() elif inputs_embeds is not None: a : Any = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) a : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: a : Any = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ ) if encoder_attention_mask is None: a : Optional[Any] = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ ) if token_type_ids is None: a : str = torch.zeros(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. a : torch.Tensor = self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: a : Optional[int] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: a : Union[str, Any] = encoder_attention_mask[:, None, None, :] a : Union[str, Any] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility a : Tuple = (1.0 - encoder_extended_attention_mask) * -10_000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] a : Any = self.get_head_mask(lowerCAmelCase__ , self.config.num_hidden_layers ) a : Dict = self.embeddings( input_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ ) a : Optional[int] = self.encoder( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , ) a : str = encoder_outputs[0] a : Any = self.pooler(lowerCAmelCase__ ) a : Tuple = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class __UpperCamelCase ( a__ ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: a : List[Any] = message a : List[str] = exit_layer # start from 1! class __UpperCamelCase ( nn.Module ): def __init__( self , lowerCAmelCase__ ) -> Dict: super().__init__() a : List[str] = BertPooler(lowerCAmelCase__ ) a : Any = nn.Dropout(config.hidden_dropout_prob ) a : str = nn.Linear(config.hidden_size , config.num_labels ) def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]: # Pooler a : str = encoder_outputs[0] a : Optional[int] = self.pooler(lowerCAmelCase__ ) # "return" pooler_output # BertModel a : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification a : int = bmodel_output[1] a : List[Any] = self.dropout(lowerCAmelCase__ ) a : str = self.classifier(lowerCAmelCase__ ) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """ , a__ , ) class __UpperCamelCase ( a__ ): def __init__( self , lowerCAmelCase__ ) -> Optional[int]: super().__init__(lowerCAmelCase__ ) a : Dict = config.num_labels a : Optional[int] = config.num_hidden_layers a : str = DeeBertModel(lowerCAmelCase__ ) a : str = nn.Dropout(config.hidden_dropout_prob ) a : str = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=-1 , lowerCAmelCase__=False , ) -> Optional[int]: a : int = self.num_layers try: a : List[Any] = self.bert( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits a : str = outputs[1] a : str = self.dropout(lowerCAmelCase__ ) a : Any = self.classifier(lowerCAmelCase__ ) a : Any = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: a : Optional[Any] = e.message a : Optional[int] = e.exit_layer a : Optional[int] = outputs[0] if not self.training: a : Tuple = entropy(lowerCAmelCase__ ) a : Optional[Any] = [] a : Optional[Any] = [] if labels is not None: if self.num_labels == 1: # We are doing regression a : int = MSELoss() a : str = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: a : List[Any] = CrossEntropyLoss() a : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits a : Any = [] for highway_exit in outputs[-1]: a : Any = highway_exit[0] if not self.training: highway_logits_all.append(lowerCAmelCase__ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression a : Tuple = MSELoss() a : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: a : Union[str, Any] = CrossEntropyLoss() a : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(lowerCAmelCase__ ) if train_highway: a : Tuple = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: a : List[Any] = (loss,) + outputs if not self.training: a : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: a : List[str] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
31
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCamelCase ( a__ ): lowerCamelCase : torch.FloatTensor lowerCamelCase : torch.FloatTensor lowerCamelCase : Optional[torch.FloatTensor] =None class __UpperCamelCase ( a__ , a__ ): lowerCamelCase : Tuple =2 @register_to_config def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]: # standard deviation of the initial noise distribution a : Tuple = sigma_max # setable values a : int = None a : np.IntTensor = None a : torch.FloatTensor = None # sigma(t_i) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor: return sample def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]: a : List[Any] = num_inference_steps a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ ) a : List[str] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]: if self.config.s_min <= sigma <= self.config.s_max: a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: a : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device ) a : Any = sigma + gamma * sigma a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]: a : Union[str, Any] = sample_hat + sigma_hat * model_output a : Tuple = (sample_hat - pred_original_sample) / sigma_hat a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]: a : Optional[int] = sample_prev + sigma_prev * model_output a : str = (sample_prev - pred_original_sample) / sigma_prev a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: raise NotImplementedError()
31
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase ( a__ ): def __a ( self ) -> Optional[Any]: a : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , "embed_dim" ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_heads" ) ) class __UpperCamelCase : def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=[16, 48, 96] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 10] , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> List[Any]: a : List[str] = parent a : Tuple = batch_size a : str = image_size a : List[Any] = patch_sizes a : List[Any] = patch_stride a : int = patch_padding a : int = is_training a : str = use_labels a : List[str] = num_labels a : str = num_channels a : Optional[int] = embed_dim a : Optional[Any] = num_heads a : Tuple = stride_kv a : Union[str, Any] = depth a : Optional[Any] = cls_token a : List[Any] = attention_drop_rate a : List[str] = initializer_range a : str = layer_norm_eps def __a ( self ) -> Any: a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a : Any = None if self.use_labels: # create a random int32 tensor of given shape a : Dict = ids_tensor([self.batch_size] , self.num_labels ) a : int = self.get_config() return config, pixel_values, labels def __a ( self ) -> List[Any]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: a : List[str] = TFCvtModel(config=lowerCAmelCase__ ) a : Optional[Any] = model(lowerCAmelCase__ , training=lowerCAmelCase__ ) a : List[str] = (self.image_size, self.image_size) a, a : List[str] = image_size[0], image_size[1] for i in range(len(self.depth ) ): a : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) a : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: a : Tuple = self.num_labels a : Any = TFCvtForImageClassification(lowerCAmelCase__ ) a : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self ) -> Tuple: a : List[str] = self.prepare_config_and_inputs() a, a, a : Optional[int] = config_and_inputs a : int = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( a__ , a__ , unittest.TestCase ): lowerCamelCase : List[str] =(TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () lowerCamelCase : List[Any] =( {"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification} if is_tf_available() else {} ) lowerCamelCase : Optional[Any] =False lowerCamelCase : List[str] =False lowerCamelCase : Optional[Any] =False lowerCamelCase : int =False lowerCamelCase : Dict =False def __a ( self ) -> Optional[int]: a : Dict = TFCvtModelTester(self ) a : List[Any] = TFCvtConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 ) def __a ( self ) -> Optional[Any]: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions" ) def __a ( self ) -> Dict: pass @unittest.skip(reason="Cvt does not use inputs_embeds" ) def __a ( self ) -> Union[str, Any]: pass @unittest.skip(reason="Cvt does not support input and output embeddings" ) def __a ( self ) -> Union[str, Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) def __a ( self ) -> List[Any]: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def __a ( self ) -> Optional[int]: super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" ) def __a ( self ) -> int: a : Optional[int] = tf.keras.mixed_precision.Policy("mixed_float16" ) tf.keras.mixed_precision.set_global_policy(lowerCAmelCase__ ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32" ) def __a ( self ) -> Any: a, a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : List[str] = model_class(lowerCAmelCase__ ) a : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a : Dict = [*signature.parameters.keys()] a : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __a ( self ) -> int: def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): a : Union[str, Any] = model_class(lowerCAmelCase__ ) a : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) a : Dict = outputs.hidden_states a : Any = len(self.model_tester.depth ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) a, a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Dict = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a : Optional[int] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __a ( self ) -> Any: a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @slow def __a ( self ) -> str: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Tuple = TFCvtModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( ) ->int: '''simple docstring''' a : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __a ( self ) -> Optional[Any]: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __a ( self ) -> Optional[Any]: a : Union[str, Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) a : Any = self.default_image_processor a : Any = prepare_img() a : str = image_processor(images=lowerCAmelCase__ , return_tensors="tf" ) # forward pass a : List[str] = model(**lowerCAmelCase__ ) # verify the logits a : Dict = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) a : str = tf.constant([0.9_285, 0.9_015, -0.3_150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase__ , atol=1E-4 ) )
31
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal a : Optional[Any] = datasets.utils.logging.get_logger(__name__) a : Union[str, Any] = ['''names''', '''prefix'''] a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] a : Any = ['''encoding_errors''', '''on_bad_lines'''] a : List[str] = ['''date_format'''] @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): lowerCamelCase : str ="," lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[Union[int, List[int], str]] ="infer" lowerCamelCase : Optional[List[str]] =None lowerCamelCase : Optional[List[str]] =None lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None lowerCamelCase : Optional[Union[List[int], List[str]]] =None lowerCamelCase : Optional[str] =None lowerCamelCase : bool =True lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None lowerCamelCase : Optional[list] =None lowerCamelCase : Optional[list] =None lowerCamelCase : bool =False lowerCamelCase : Optional[Union[int, List[int]]] =None lowerCamelCase : Optional[int] =None lowerCamelCase : Optional[Union[str, List[str]]] =None lowerCamelCase : bool =True lowerCamelCase : bool =True lowerCamelCase : bool =False lowerCamelCase : bool =True lowerCamelCase : Optional[str] =None lowerCamelCase : str ="." lowerCamelCase : Optional[str] =None lowerCamelCase : str ='"' lowerCamelCase : int =0 lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[str] =None lowerCamelCase : bool =True lowerCamelCase : bool =True lowerCamelCase : int =0 lowerCamelCase : bool =True lowerCamelCase : bool =False lowerCamelCase : Optional[str] =None lowerCamelCase : int =1_0000 lowerCamelCase : Optional[datasets.Features] =None lowerCamelCase : Optional[str] ="strict" lowerCamelCase : Literal["error", "warn", "skip"] ="error" lowerCamelCase : Optional[str] =None def __a ( self ) -> Dict: if self.delimiter is not None: a : int = self.delimiter if self.column_names is not None: a : Any = self.column_names @property def __a ( self ) -> List[str]: a : Dict = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class __UpperCamelCase ( datasets.ArrowBasedBuilder ): lowerCamelCase : Union[str, Any] =CsvConfig def __a ( self ) -> Optional[Any]: return datasets.DatasetInfo(features=self.config.features ) def __a ( self , lowerCAmelCase__ ) -> Optional[int]: if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): a : Tuple = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Tuple = [files] a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] a : int = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Any = [files] a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def __a ( self , lowerCAmelCase__ ) -> pa.Table: if self.config.features is not None: a : Optional[Any] = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def __a ( self , lowerCAmelCase__ ) -> Any: a : Tuple = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str a : Any = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): a : Any = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" ) raise
31
1
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py a : Optional[int] = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) a : Optional[Any] = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] ) ->Union[str, Any]: '''simple docstring''' a : Dict = SavedModel() a : Dict = [] with open(os.path.join(_lowercase , "utils" , "tf_ops" , "onnx.json" ) ) as f: a : int = json.load(_lowercase )["opsets"] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(_lowercase )] ) with open(_lowercase , "rb" ) as f: saved_model.ParseFromString(f.read() ) a : Any = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want a : Tuple = sorted(_lowercase ) a : str = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(_lowercase ) if strict and len(_lowercase ) > 0: raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(_lowercase ) > 0: print(F"""Found the following incompatible ops for the opset {opset}:""" ) print(*_lowercase , sep="\n" ) else: print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": a : Any = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) a : Union[str, Any] = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
31
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( a__ , a__ , unittest.TestCase ): lowerCamelCase : Dict =IFPipeline lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""} lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""} def __a ( self ) -> List[str]: return self._get_dummy_components() def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict: if str(lowerCAmelCase__ ).startswith("mps" ): a : Tuple = torch.manual_seed(lowerCAmelCase__ ) else: a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self ) -> Union[str, Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __a ( self ) -> Union[str, Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __a ( self ) -> Optional[int]: self._test_save_load_local() def __a ( self ) -> Tuple: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self ) -> str: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ) -> Tuple: # if a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) a : str = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() a : Optional[int] = None a : Optional[int] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components ) a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: # pipeline 1 _start_torch_memory_measurement() a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) a : Dict = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (64, 64, 3) a : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 a : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) # pipeline 2 _start_torch_memory_measurement() a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Union[str, Any] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (256, 256, 3) a : int = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 a : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: # pipeline 1 _start_torch_memory_measurement() a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) a : List[Any] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , ) a : Tuple = output.images[0] assert image.shape == (64, 64, 3) a : int = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 a : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) # pipeline 2 _start_torch_memory_measurement() a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Dict = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ) a : int = output.images[0] assert image.shape == (256, 256, 3) a : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 a : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: # pipeline 1 _start_torch_memory_measurement() a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ ) a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) a : List[str] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , ) a : List[Any] = output.images[0] assert image.shape == (64, 64, 3) a : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 a : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) # pipeline 2 _start_torch_memory_measurement() a : str = torch.Generator(device="cpu" ).manual_seed(0 ) a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ ) a : Optional[int] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (256, 256, 3) a : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 a : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( ) ->List[str]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
31
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Any: a : Union[str, Any] = size if size is not None else {"height": 18, "width": 18} a : Optional[int] = parent a : Any = batch_size a : str = num_channels a : List[Any] = image_size a : Dict = min_resolution a : int = max_resolution a : Optional[int] = do_resize a : List[str] = size a : Union[str, Any] = apply_ocr def __a ( self ) -> Any: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Union[str, Any] =LayoutLMvaImageProcessor if is_pytesseract_available() else None def __a ( self ) -> Union[str, Any]: a : Tuple = LayoutLMvaImageProcessingTester(self ) @property def __a ( self ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def __a ( self ) -> int: a : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "apply_ocr" ) ) def __a ( self ) -> Union[str, Any]: a : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) a : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def __a ( self ) -> Any: pass def __a ( self ) -> str: # Initialize image_processing a : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input a : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , lowerCAmelCase__ ) self.assertIsInstance(encoding.boxes , lowerCAmelCase__ ) # Test batched a : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __a ( self ) -> Union[str, Any]: # Initialize image_processing a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input a : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched a : Dict = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __a ( self ) -> Optional[Any]: # Initialize image_processing a : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input a : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched a : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __a ( self ) -> Union[str, Any]: # with apply_OCR = True a : Optional[Any] = LayoutLMvaImageProcessor() from datasets import load_dataset a : Tuple = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) a : int = Image.open(ds[0]["file"] ).convert("RGB" ) a : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 a : Tuple = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 a : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , lowerCAmelCase__ ) self.assertListEqual(encoding.boxes , lowerCAmelCase__ ) # with apply_OCR = False a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) a : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
31
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> Optional[Any]: a : Optional[int] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Optional[Any]: a : str = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Dict: a : List[str] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> List[Any]: a : Optional[Any] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Tuple: a : Tuple = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Dict: a : Dict = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] a : Dict = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> List[str]: a : List[Any] = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] a : Any = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> int: # pass variant but use the non-variant filenames a : int = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] a : Tuple = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> str: a : str = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] a : Any = "fp16" self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> str: a : Union[str, Any] = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] a : str = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> List[str]: # pass variant but use the non-variant filenames a : Optional[int] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] a : str = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> Optional[Any]: a : Any = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] a : Optional[int] = "fp16" self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
31
1
"""simple docstring""" import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a : List[Any] = logging.get_logger(__name__) a : Optional[Any] = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } a : Any = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } a : Union[str, Any] = { '''ctrl''': 256, } a : List[Any] = { '''Pregnancy''': 168629, '''Christianity''': 7675, '''Explain''': 106423, '''Fitness''': 63440, '''Saving''': 63163, '''Ask''': 27171, '''Ass''': 95985, '''Joke''': 163509, '''Questions''': 45622, '''Thoughts''': 49605, '''Retail''': 52342, '''Feminism''': 164338, '''Writing''': 11992, '''Atheism''': 192263, '''Netflix''': 48616, '''Computing''': 39639, '''Opinion''': 43213, '''Alone''': 44967, '''Funny''': 58917, '''Gaming''': 40358, '''Human''': 4088, '''India''': 1331, '''Joker''': 77138, '''Diet''': 36206, '''Legal''': 11859, '''Norman''': 4939, '''Tip''': 72689, '''Weight''': 52343, '''Movies''': 46273, '''Running''': 23425, '''Science''': 2090, '''Horror''': 37793, '''Confession''': 60572, '''Finance''': 12250, '''Politics''': 16360, '''Scary''': 191985, '''Support''': 12654, '''Technologies''': 32516, '''Teenage''': 66160, '''Event''': 32769, '''Learned''': 67460, '''Notion''': 182770, '''Wikipedia''': 37583, '''Books''': 6665, '''Extract''': 76050, '''Confessions''': 102701, '''Conspiracy''': 75932, '''Links''': 63674, '''Narcissus''': 150425, '''Relationship''': 54766, '''Relationships''': 134796, '''Reviews''': 41671, '''News''': 4256, '''Translation''': 26820, '''multilingual''': 128406, } def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Any: '''simple docstring''' a : Any = set() a : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) a : Optional[Any] = char a : List[Any] = set(_lowercase ) return pairs class __UpperCamelCase ( a__ ): lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES lowerCamelCase : Any =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Union[str, Any] =CONTROL_CODES def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<unk>" , **lowerCAmelCase__ ) -> List[str]: super().__init__(unk_token=lowerCAmelCase__ , **lowerCAmelCase__ ) with open(lowerCAmelCase__ , encoding="utf-8" ) as vocab_handle: a : int = json.load(lowerCAmelCase__ ) a : Dict = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase__ , encoding="utf-8" ) as merges_handle: a : Any = merges_handle.read().split("\n" )[1:-1] a : Optional[Any] = [tuple(merge.split() ) for merge in merges] a : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) a : Optional[Any] = {} @property def __a ( self ) -> List[str]: return len(self.encoder ) def __a ( self ) -> List[str]: return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self , lowerCAmelCase__ ) -> Optional[int]: if token in self.cache: return self.cache[token] a : int = tuple(lowerCAmelCase__ ) a : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) a : Dict = get_pairs(lowerCAmelCase__ ) if not pairs: return token while True: a : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break a, a : Dict = bigram a : Union[str, Any] = [] a : int = 0 while i < len(lowerCAmelCase__ ): try: a : List[str] = word.index(lowerCAmelCase__ , lowerCAmelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) a : List[str] = j if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 a : int = tuple(lowerCAmelCase__ ) a : Union[str, Any] = new_word if len(lowerCAmelCase__ ) == 1: break else: a : Optional[Any] = get_pairs(lowerCAmelCase__ ) a : str = "@@ ".join(lowerCAmelCase__ ) a : List[Any] = word[:-4] a : int = word return word def __a ( self , lowerCAmelCase__ ) -> Any: a : str = [] a : List[Any] = re.findall(R"\S+\n?" , lowerCAmelCase__ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase__ ).split(" " ) ) ) return split_tokens def __a ( self , lowerCAmelCase__ ) -> Optional[Any]: return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) ) def __a ( self , lowerCAmelCase__ ) -> str: return self.decoder.get(lowerCAmelCase__ , self.unk_token ) def __a ( self , lowerCAmelCase__ ) -> int: a : List[str] = " ".join(lowerCAmelCase__ ).replace("@@ " , "" ).strip() return out_string def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a : Optional[int] = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) a : List[str] = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + "\n" ) a : Tuple = 0 with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) a : Dict = token_index writer.write(" ".join(lowerCAmelCase__ ) + "\n" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
31
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class __UpperCamelCase ( nn.Module ): lowerCamelCase : int lowerCamelCase : jnp.dtype =jnp.floataa def __a ( self ) -> Tuple: a : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , lowerCAmelCase__ ) -> Optional[Any]: a, a, a, a : List[str] = hidden_states.shape a : List[Any] = jax.image.resize( lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) a : List[str] = self.conv(lowerCAmelCase__ ) return hidden_states class __UpperCamelCase ( nn.Module ): lowerCamelCase : int lowerCamelCase : jnp.dtype =jnp.floataa def __a ( self ) -> Dict: a : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , lowerCAmelCase__ ) -> Tuple: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) a : Tuple = self.conv(lowerCAmelCase__ ) return hidden_states class __UpperCamelCase ( nn.Module ): lowerCamelCase : int lowerCamelCase : int =None lowerCamelCase : float =0.0 lowerCamelCase : bool =None lowerCamelCase : jnp.dtype =jnp.floataa def __a ( self ) -> int: a : Dict = self.in_channels if self.out_channels is None else self.out_channels a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) a : List[Any] = nn.Conv( lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype ) a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) a : Optional[int] = nn.Dropout(self.dropout_prob ) a : Dict = nn.Conv( lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut a : List[str] = None if use_nin_shortcut: a : Optional[Any] = nn.Conv( lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str: a : int = hidden_states a : Tuple = self.norma(lowerCAmelCase__ ) a : Any = nn.swish(lowerCAmelCase__ ) a : int = self.conva(lowerCAmelCase__ ) a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) ) a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 ) a : Dict = hidden_states + temb a : str = self.norma(lowerCAmelCase__ ) a : List[Any] = nn.swish(lowerCAmelCase__ ) a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[str] = self.conva(lowerCAmelCase__ ) if self.conv_shortcut is not None: a : Tuple = self.conv_shortcut(lowerCAmelCase__ ) return hidden_states + residual
31
1
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int: '''simple docstring''' a : int = {} a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"] a : Any = len(example["content"] ) / len(output["input_ids"] ) return output a : int = HfArgumentParser(PretokenizationArguments) a : Optional[int] = parser.parse_args() if args.num_workers is None: a : Tuple = multiprocessing.cpu_count() a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir) a : Dict = time.time() a : Tuple = load_dataset(args.dataset_name, split='''train''') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') a : Dict = time.time() a : Tuple = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') a : Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
31
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]: '''simple docstring''' if subparsers is not None: a : Dict = subparsers.add_parser("test" ) else: a : Tuple = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=_lowercase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=_lowercase ) return parser def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str: '''simple docstring''' a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: a : int = script_name else: a : int = F"""--config_file={args.config_file} {script_name}""" a : Optional[int] = ["accelerate-launch"] + test_args.split() a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' a : Any = test_command_parser() a : Union[str, Any] = parser.parse_args() test_command(_lowercase ) if __name__ == "__main__": main()
31
1
"""simple docstring""" import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Union[str, Any] =PhobertTokenizer lowerCamelCase : Dict =False def __a ( self ) -> Any: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a : Optional[int] = ["T@@", "i", "I", "R@@", "r", "e@@"] a : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) a : Any = ["#version: 0.2", "l à</w>"] a : Dict = {"unk_token": "<unk>"} a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(f"""{token} {vocab_tokens[token]}\n""" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase__ ) ) def __a ( self , **lowerCAmelCase__ ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ ) -> List[Any]: a : Union[str, Any] = "Tôi là VinAI Research" a : int = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>" return input_text, output_text def __a ( self ) -> Optional[Any]: a : Tuple = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a : str = "Tôi là VinAI Research" a : str = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split() a : str = tokenizer.tokenize(lowerCAmelCase__ ) print(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : Union[str, Any] = tokens + [tokenizer.unk_token] a : int = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
31
"""simple docstring""" a : str = 8.314_4598 def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' if temperature < 0: raise Exception("Temperature cannot be less than 0 K" ) if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a : Any = 300 a : Dict = 28 a : Dict = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
31
1
"""simple docstring""" import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : str =CLIPTokenizer lowerCamelCase : Tuple =CLIPTokenizerFast lowerCamelCase : List[str] =True lowerCamelCase : int ={} lowerCamelCase : int =False def __a ( self ) -> Any: super().setUp() # fmt: off a : Dict = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) a : Optional[int] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a : List[str] = {"unk_token": "<unk>"} a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase__ ) ) def __a ( self , **lowerCAmelCase__ ) -> List[Any]: kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __a ( self , **lowerCAmelCase__ ) -> int: kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ ) -> Optional[int]: a : List[Any] = "lower newer" a : int = "lower newer" return input_text, output_text def __a ( self ) -> str: a : Optional[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a : List[Any] = "lower newer" a : Optional[int] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a : List[str] = tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[Any] = tokens + [tokenizer.unk_token] a : List[Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) @require_ftfy def __a ( self ) -> Dict: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a : List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) a : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) a : Any = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a : str = tokenizer_s.tokenize(lowerCAmelCase__ ) a : Union[str, Any] = tokenizer_r.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a : List[Any] = "xa\u0303y" + " " + "x\xe3y" a : str = tokenizer_s.tokenize(lowerCAmelCase__ ) a : Optional[int] = tokenizer_r.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Test that the tokenization is identical on unicode of space type a : Any = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a : Any = tokenizer_s.tokenize(lowerCAmelCase__ ) a : Dict = tokenizer_r.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Test that the tokenization is identical on unicode of line break type a : Any = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a : List[str] = tokenizer_s.tokenize(lowerCAmelCase__ ) a : Tuple = tokenizer_r.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> List[str]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a : Tuple = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a : Tuple = f"""{text_of_1_token} {text_of_1_token}""" a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , ) a : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) a : Optional[Any] = f""" {text}""" a : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , ) a : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) def __a ( self ) -> Union[str, Any]: # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCAmelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def __a ( self ) -> Union[str, Any]: super().test_tokenization_python_rust_equals() def __a ( self ) -> List[str]: # CLIP always lower cases letters pass
31
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __UpperCamelCase ( unittest.TestCase ): def __a ( self , lowerCAmelCase__ ) -> Optional[int]: a : str = 3 a : str = 250 a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ ) a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length return input_ids, scores def __a ( self ) -> List[Any]: a, a : str = self._get_tensors(5 ) a : Any = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : str = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> List[Any]: a : Optional[Any] = MaxLengthCriteria(max_length=10 ) a, a : int = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> List[str]: a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) a, a : str = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a : List[Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __a ( self ) -> str: a, a : Tuple = self._get_tensors(5 ) a : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> str: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(lowerCAmelCase__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(lowerCAmelCase__ ) , 1 )
31
1
"""simple docstring""" import numpy as np def _SCREAMING_SNAKE_CASE ( _lowercase : np.array ) ->np.array: '''simple docstring''' return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
31
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int: '''simple docstring''' a : Dict = [1, 2, 5, 10, 20, 50, 100, 200] a : Optional[Any] = [0] * (pence + 1) a : List[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_lowercase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73682
31
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a : Tuple = logging.get_logger(__name__) a : Optional[Any] = {'''tokenizer_file''': '''tokenizer.json'''} a : str = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class __UpperCamelCase ( a__ ): lowerCamelCase : Tuple =VOCAB_FILES_NAMES lowerCamelCase : str =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""] lowerCamelCase : Dict =None def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__=False , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[int]: super().__init__( lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , **lowerCAmelCase__ , ) a : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space: a : Union[str, Any] = getattr(lowerCAmelCase__ , pre_tok_state.pop("type" ) ) a : Optional[int] = add_prefix_space a : Any = pre_tok_class(**lowerCAmelCase__ ) a : Optional[int] = add_prefix_space def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding: a : Dict = kwargs.get("is_split_into_words" , lowerCAmelCase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" " pretokenized inputs." ) return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ ) def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding: a : List[Any] = kwargs.get("is_split_into_words" , lowerCAmelCase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" " pretokenized inputs." ) return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: a : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ ) -> List[int]: a : Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] ) if len(lowerCAmelCase__ ) > self.model_max_length: a : int = input_ids[-self.model_max_length :] return input_ids
31
"""simple docstring""" from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=a__ ): lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""] def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]: requires_backends(self , ["transformers", "torch", "note_seq"] ) @classmethod def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any: requires_backends(cls , ["transformers", "torch", "note_seq"] ) @classmethod def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int: requires_backends(cls , ["transformers", "torch", "note_seq"] )
31
1
"""simple docstring""" from functools import reduce a : Tuple = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def _SCREAMING_SNAKE_CASE ( _lowercase : str = N ) ->int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) ) for i in range(len(_lowercase ) - 12 ) ) if __name__ == "__main__": print(F'''{solution() = }''')
31
"""simple docstring""" import qiskit def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts: '''simple docstring''' a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" ) # Create a Quantum Circuit acting on the q register a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_lowercase ) if __name__ == "__main__": print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
31
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) a : Dict = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = ['''ViTFeatureExtractor'''] a : str = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys a : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
31
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict: '''simple docstring''' a : List[str] = 0 if start < end: a : Tuple = randint(_lowercase , _lowercase ) a : List[str] = a[end] a : str = a[pivot] a : Optional[int] = temp a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase ) count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 ) count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase ) return count def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str: '''simple docstring''' a : Union[str, Any] = 0 a : List[Any] = randint(_lowercase , _lowercase ) a : int = a[end] a : List[str] = a[pivot] a : Tuple = temp a : Union[str, Any] = start - 1 for index in range(_lowercase , _lowercase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value a : List[str] = new_pivot_index + 1 a : Optional[int] = a[new_pivot_index] a : Union[str, Any] = a[index] a : List[Any] = temp a : Tuple = a[new_pivot_index + 1] a : str = a[end] a : Dict = temp return new_pivot_index + 1, count a : int = TemporaryFile() a : Tuple = 100 # 1000 elements are to be sorted a , a : int = 0, 1 # mean and standard deviation a : List[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array a : int = np.load(outfile) a : Tuple = len(M) - 1 a : Union[str, Any] = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
31
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) a : Optional[Any] = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = ['''LayoutLMv2FeatureExtractor'''] a : str = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
31
"""simple docstring""" import baseaa def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes: '''simple docstring''' return baseaa.aaaencode(string.encode("utf-8" ) ) def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str: '''simple docstring''' return baseaa.aaadecode(_lowercase ).decode("utf-8" ) if __name__ == "__main__": import doctest doctest.testmod()
31
1
"""simple docstring""" a : str = 8.314_4598 def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' if temperature < 0: raise Exception("Temperature cannot be less than 0 K" ) if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a : Any = 300 a : Dict = 28 a : Dict = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
31
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: a : Tuple = None a : Any = logging.get_logger(__name__) a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} a : str = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } a : str = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } a : Union[str, Any] = '''▁''' class __UpperCamelCase ( a__ ): lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : List[Any] =AlbertTokenizer def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. a : Optional[int] = ( AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token ) super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) a : Dict = do_lower_case a : Any = remove_space a : Optional[Any] = keep_accents a : List[str] = vocab_file a : Optional[Any] = False if not self.vocab_file else True def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : Optional[Any] = [self.sep_token_id] a : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : Optional[Any] = [self.sep_token_id] a : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a : Dict = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
31
1
"""simple docstring""" import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Any =DownBlockaD # noqa F405 lowerCamelCase : List[str] ="""down""" def __a ( self ) -> List[str]: a : Optional[Any] = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Tuple =ResnetDownsampleBlockaD # noqa F405 lowerCamelCase : Tuple ="""down""" def __a ( self ) -> Dict: a : Any = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Optional[Any] =AttnDownBlockaD # noqa F405 lowerCamelCase : Optional[int] ="""down""" def __a ( self ) -> Union[str, Any]: a : int = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[str] =CrossAttnDownBlockaD # noqa F405 lowerCamelCase : List[Any] ="""down""" def __a ( self ) -> Dict: a, a : Union[str, Any] = super().prepare_init_args_and_inputs_for_common() a : int = 32 return init_dict, inputs_dict def __a ( self ) -> List[Any]: a : str = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[str] =SimpleCrossAttnDownBlockaD # noqa F405 lowerCamelCase : Union[str, Any] ="""down""" @property def __a ( self ) -> Dict: return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ ) def __a ( self ) -> Optional[Any]: a, a : List[str] = super().prepare_init_args_and_inputs_for_common() a : Optional[Any] = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def __a ( self ) -> List[Any]: a : Optional[int] = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Dict =SkipDownBlockaD # noqa F405 lowerCamelCase : int ="""down""" @property def __a ( self ) -> str: return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ ) def __a ( self ) -> Dict: a : int = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[str] =AttnSkipDownBlockaD # noqa F405 lowerCamelCase : Optional[Any] ="""down""" @property def __a ( self ) -> Optional[Any]: return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a : int = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Optional[Any] =DownEncoderBlockaD # noqa F405 lowerCamelCase : Optional[Any] ="""down""" @property def __a ( self ) -> Optional[Any]: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def __a ( self ) -> List[str]: a : List[Any] = { "in_channels": 32, "out_channels": 32, } a : int = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> Tuple: a : Optional[Any] = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[Any] =AttnDownEncoderBlockaD # noqa F405 lowerCamelCase : str ="""down""" @property def __a ( self ) -> Tuple: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def __a ( self ) -> List[str]: a : int = { "in_channels": 32, "out_channels": 32, } a : Optional[int] = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> Optional[Any]: a : Union[str, Any] = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : str =UNetMidBlockaD # noqa F405 lowerCamelCase : Optional[int] ="""mid""" def __a ( self ) -> Dict: a : Optional[Any] = { "in_channels": 32, "temb_channels": 128, } a : Optional[int] = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> Tuple: a : Tuple = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[str] =UNetMidBlockaDCrossAttn # noqa F405 lowerCamelCase : Optional[Any] ="""mid""" def __a ( self ) -> int: a, a : Any = super().prepare_init_args_and_inputs_for_common() a : List[str] = 32 return init_dict, inputs_dict def __a ( self ) -> Optional[Any]: a : Tuple = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Union[str, Any] =UNetMidBlockaDSimpleCrossAttn # noqa F405 lowerCamelCase : Dict ="""mid""" @property def __a ( self ) -> Optional[int]: return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a, a : str = super().prepare_init_args_and_inputs_for_common() a : str = 32 return init_dict, inputs_dict def __a ( self ) -> Dict: a : List[Any] = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Optional[Any] =UpBlockaD # noqa F405 lowerCamelCase : Dict ="""up""" @property def __a ( self ) -> int: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def __a ( self ) -> List[Any]: a : Tuple = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Optional[int] =ResnetUpsampleBlockaD # noqa F405 lowerCamelCase : Dict ="""up""" @property def __a ( self ) -> Any: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def __a ( self ) -> int: a : Tuple = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Dict =CrossAttnUpBlockaD # noqa F405 lowerCamelCase : str ="""up""" @property def __a ( self ) -> Tuple: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a, a : List[str] = super().prepare_init_args_and_inputs_for_common() a : Optional[int] = 32 return init_dict, inputs_dict def __a ( self ) -> Union[str, Any]: a : Tuple = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : int =SimpleCrossAttnUpBlockaD # noqa F405 lowerCamelCase : Any ="""up""" @property def __a ( self ) -> Tuple: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ , include_encoder_hidden_states=lowerCAmelCase__ ) def __a ( self ) -> Optional[int]: a, a : Any = super().prepare_init_args_and_inputs_for_common() a : Tuple = 32 return init_dict, inputs_dict def __a ( self ) -> Dict: a : List[Any] = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : str =AttnUpBlockaD # noqa F405 lowerCamelCase : Any ="""up""" @property def __a ( self ) -> Any: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def __a ( self ) -> Union[str, Any]: a : Union[str, Any] = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : str =SkipUpBlockaD # noqa F405 lowerCamelCase : Dict ="""up""" @property def __a ( self ) -> Any: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a : Dict = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[Any] =AttnSkipUpBlockaD # noqa F405 lowerCamelCase : Tuple ="""up""" @property def __a ( self ) -> List[Any]: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def __a ( self ) -> Tuple: a : Optional[int] = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Optional[Any] =UpDecoderBlockaD # noqa F405 lowerCamelCase : Optional[int] ="""up""" @property def __a ( self ) -> Optional[Any]: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def __a ( self ) -> List[str]: a : Union[str, Any] = {"in_channels": 32, "out_channels": 32} a : Dict = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> Dict: a : int = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : int =AttnUpDecoderBlockaD # noqa F405 lowerCamelCase : int ="""up""" @property def __a ( self ) -> List[str]: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def __a ( self ) -> Dict: a : Tuple = {"in_channels": 32, "out_channels": 32} a : List[str] = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> Optional[int]: a : str = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568] super().test_output(lowerCAmelCase__ )
31
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch a : List[Any] = random.Random() def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]: '''simple docstring''' if rng is None: a : Tuple = global_rng a : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class __UpperCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]: a : int = parent a : Tuple = batch_size a : Dict = min_seq_length a : Any = max_seq_length a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) a : Union[str, Any] = feature_size a : Tuple = padding_value a : str = sampling_rate a : Dict = do_normalize a : str = num_mel_bins a : List[str] = hop_length a : str = win_length a : Optional[Any] = win_function a : List[str] = fmin a : Any = fmax a : Optional[int] = mel_floor a : Tuple = return_attention_mask def __a ( self ) -> Optional[Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple: def _flatten(lowerCAmelCase__ ): return list(itertools.chain(*lowerCAmelCase__ ) ) if equal_length: a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size a : str = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs] return speech_inputs def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict: if equal_length: a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size a : Any = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs] return speech_inputs @require_torch class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Tuple =SpeechTaFeatureExtractor def __a ( self ) -> Union[str, Any]: a : Tuple = SpeechTaFeatureExtractionTester(self ) def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]: self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) ) def __a ( self ) -> Union[str, Any]: # Tests that all call wrap to encode_plus and batch_encode_plus a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs] # Test not batched input a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test batched a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def __a ( self ) -> Optional[Any]: a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : int = ["longest", "max_length", "do_not_pad"] a : Tuple = [None, 1600, None] for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ): a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" ) a : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __a ( self ) -> str: a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : List[str] = range(800 , 1400 , 200 ) a : List[str] = [floats_list((1, x) )[0] for x in lengths] a : Any = ["longest", "max_length", "do_not_pad"] a : Any = [None, 1600, None] for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ): a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ ) a : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __a ( self ) -> Dict: a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : Union[str, Any] = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" ) a : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __a ( self ) -> Dict: a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : List[Any] = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" ) a : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : int = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" ) a : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def __a ( self ) -> List[str]: a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : Any = np.random.rand(100 ).astype(np.floataa ) a : Optional[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __a ( self ) -> Tuple: # Tests that all call wrap to encode_plus and batch_encode_plus a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs] # Test feature size a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test batched a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] a : List[Any] = np.asarray(lowerCAmelCase__ ) a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def __a ( self ) -> str: a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target() a : Any = self.feature_extraction_class(**self.feat_extract_dict ) a : Union[str, Any] = feat_extract.model_input_names[0] a : List[str] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) ) a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ ) a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" ) a : Tuple = processed_features[input_name] if len(batch_features_input.shape ) < 3: a : Dict = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __a ( self ) -> Tuple: a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ ) a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) a : Optional[int] = feat_extract.model_input_names[0] a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" ) a : Tuple = processed_features[input_name] if len(batch_features_input.shape ) < 3: a : List[str] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __a ( self ) -> Optional[Any]: a : Dict = self.feature_extraction_class(**self.feat_extract_dict ) a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target() a : Optional[Any] = feat_extract.model_input_names[0] a : List[str] = BatchFeature({input_name: speech_inputs} ) a : Tuple = feat_extract.num_mel_bins # hack! a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name] a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def __a ( self ) -> Union[str, Any]: a : Any = self.feat_extract_dict a : Optional[Any] = True a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ ) a : Any = self.feat_extract_tester.prepare_inputs_for_target() a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs] a : int = feat_extract.model_input_names[0] a : List[Any] = BatchFeature({input_name: speech_inputs} ) a : Union[str, Any] = feat_extract.num_mel_bins # hack! a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" ) self.assertIn("attention_mask" , lowerCAmelCase__ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a : Tuple = self.feat_extract_dict a : str = True a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ ) a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target() a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs] a : Optional[Any] = feat_extract.model_input_names[0] a : str = BatchFeature({input_name: speech_inputs} ) a : Optional[Any] = min(lowerCAmelCase__ ) a : List[Any] = feat_extract.num_mel_bins # hack! a : Any = feat_extract.pad( lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" ) self.assertIn("attention_mask" , lowerCAmelCase__ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def __a ( self , lowerCAmelCase__ ) -> Optional[int]: from datasets import load_dataset a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def __a ( self ) -> Union[str, Any]: # fmt: off a : List[Any] = torch.tensor( [2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03, 3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03, 2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04, 4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03, 7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04, 4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] ) # fmt: on a : List[str] = self._load_datasamples(1 ) a : Union[str, Any] = SpeechTaFeatureExtractor() a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 9_3680) ) self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) ) def __a ( self ) -> Union[str, Any]: # fmt: off a : Tuple = torch.tensor( [-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777, -3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386, -3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571, -3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] ) # fmt: on a : Dict = self._load_datasamples(1 ) a : Tuple = SpeechTaFeatureExtractor() a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
31
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str: '''simple docstring''' a : list[list[str]] = [[] for _ in range(_lowercase )] a : Dict = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(_lowercase ) <= key: return input_string for position, character in enumerate(_lowercase ): a : str = position % (lowest * 2) # puts it in bounds a : Optional[Any] = min(_lowercase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_lowercase ) a : Optional[int] = ["".join(_lowercase ) for row in temp_grid] a : str = "".join(_lowercase ) return output_string def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str: '''simple docstring''' a : str = [] a : Dict = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string a : list[list[str]] = [[] for _ in range(_lowercase )] # generates template for position in range(len(_lowercase ) ): a : Any = position % (lowest * 2) # puts it in bounds a : Any = min(_lowercase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) a : List[str] = 0 for row in temp_grid: # fills in the characters a : Union[str, Any] = input_string[counter : counter + len(_lowercase )] grid.append(list(_lowercase ) ) counter += len(_lowercase ) a : Union[str, Any] = "" # reads as zigzag for position in range(len(_lowercase ) ): a : Tuple = position % (lowest * 2) # puts it in bounds a : str = min(_lowercase , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->dict[int, str]: '''simple docstring''' a : Optional[Any] = {} for key_guess in range(1 , len(_lowercase ) ): # tries every key a : int = decrypt(_lowercase , _lowercase ) return results if __name__ == "__main__": import doctest doctest.testmod()
31
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int: '''simple docstring''' a : int = {} a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"] a : Any = len(example["content"] ) / len(output["input_ids"] ) return output a : int = HfArgumentParser(PretokenizationArguments) a : Optional[int] = parser.parse_args() if args.num_workers is None: a : Tuple = multiprocessing.cpu_count() a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir) a : Dict = time.time() a : Tuple = load_dataset(args.dataset_name, split='''train''') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') a : Dict = time.time() a : Tuple = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') a : Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
31
1
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class __UpperCamelCase ( unittest.TestCase ): def __a ( self , lowerCAmelCase__ ) -> Dict: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ): a : str = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(lowerCAmelCase__ ) def __a ( self ) -> List[Any]: a : Dict = "sshleifer/tiny-gpt2" a : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCAmelCase__ , multi_process=lowerCAmelCase__ , ) a : Any = TensorFlowBenchmark(lowerCAmelCase__ ) a : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> str: a : Any = "sgugger/tiny-distilbert-classification" a : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , only_pretrain_model=lowerCAmelCase__ , ) a : Optional[Any] = TensorFlowBenchmark(lowerCAmelCase__ ) a : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> str: a : List[Any] = "sshleifer/tiny-gpt2" a : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , ) a : List[Any] = TensorFlowBenchmark(lowerCAmelCase__ ) a : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> Union[str, Any]: a : Dict = "sshleifer/tiny-gpt2" a : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase__ ) a : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCAmelCase__ , multi_process=lowerCAmelCase__ , ) a : int = TensorFlowBenchmark(lowerCAmelCase__ , [config] ) a : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> Optional[Any]: a : Any = "sshleifer/tiny-gpt2" a : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ ) a : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , ) a : Tuple = TensorFlowBenchmark(lowerCAmelCase__ , [config] ) a : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> Tuple: a : List[str] = "sshleifer/tiny-gpt2" a : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , ) a : Union[str, Any] = TensorFlowBenchmark(lowerCAmelCase__ ) a : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __a ( self ) -> Optional[Any]: a : List[str] = "sshleifer/tiny-gpt2" a : int = AutoConfig.from_pretrained(lowerCAmelCase__ ) a : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , ) a : Tuple = TensorFlowBenchmark(lowerCAmelCase__ , [config] ) a : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __a ( self ) -> int: a : Any = "patrickvonplaten/t5-tiny-random" a : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ ) a : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , ) a : Union[str, Any] = TensorFlowBenchmark(lowerCAmelCase__ , configs=[config] ) a : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." ) def __a ( self ) -> str: a : int = "sshleifer/tiny-gpt2" a : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=lowerCAmelCase__ , multi_process=lowerCAmelCase__ , ) a : Optional[int] = TensorFlowBenchmark(lowerCAmelCase__ ) a : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> Optional[int]: a : Any = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: a : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=lowerCAmelCase__ , save_to_csv=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCAmelCase__ , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(lowerCAmelCase__ , "inf_mem.csv" ) , env_info_csv_file=os.path.join(lowerCAmelCase__ , "env.csv" ) , multi_process=lowerCAmelCase__ , ) a : Any = TensorFlowBenchmark(lowerCAmelCase__ ) benchmark.run() self.assertTrue(Path(os.path.join(lowerCAmelCase__ , "inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(lowerCAmelCase__ , "inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(lowerCAmelCase__ , "env.csv" ) ).exists() ) def __a ( self ) -> int: a : int = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(lowerCAmelCase__ ): self.assertTrue(hasattr(lowerCAmelCase__ , "sequential" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "cumulative" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "current" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: a : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCAmelCase__ , "log.txt" ) , log_print=lowerCAmelCase__ , trace_memory_line_by_line=lowerCAmelCase__ , eager_mode=lowerCAmelCase__ , multi_process=lowerCAmelCase__ , ) a : str = TensorFlowBenchmark(lowerCAmelCase__ ) a : Optional[Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(lowerCAmelCase__ , "log.txt" ) ).exists() )
31
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer a : List[Any] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast a : Union[str, Any] = TaTokenizerFast a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = [ '''MT5EncoderModel''', '''MT5ForConditionalGeneration''', '''MT5ForQuestionAnswering''', '''MT5Model''', '''MT5PreTrainedModel''', '''MT5Stack''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model'''] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys a : List[Any] = _LazyModule( __name__, globals()['''__file__'''], _import_structure, extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast}, module_spec=__spec__, )
31
1
"""simple docstring""" import os import time import numpy as np import onnxruntime as ort a : str = '''1''' a : List[str] = '''0''' a : str = '''1''' a : List[Any] = ort.SessionOptions() a : List[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('''Create inference session...''') a : int = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider'''] a : Union[str, Any] = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider) a : int = ort.RunOptions() a : Union[str, Any] = 128 a : Union[str, Any] = 1 a : int = np.ones((batch, sequence), dtype=np.intaa) a : Tuple = np.ones((batch, sequence), dtype=np.intaa) a : Tuple = np.ones((batch, sequence), dtype=np.intaa) print('''Warm up phase...''') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Start inference...''') a : Optional[Any] = time.time() a : Dict = 2000 a : Union[str, Any] = {} for iter in range(max_iters): a : Optional[Any] = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
31
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int: '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" ) return min_val if option else max_val def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int: '''simple docstring''' return int((number_a + number_a) / 2 ) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None: '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)" ) if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value" ) def answer(_lowercase : int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started..." ) a : Optional[Any] = lower a : List[Any] = higher a : Tuple = [] while True: a : List[Any] = get_avg(_lowercase , _lowercase ) last_numbers.append(_lowercase ) if answer(_lowercase ) == "low": a : Optional[int] = number elif answer(_lowercase ) == "high": a : Tuple = number else: break print(F"""guess the number : {last_numbers[-1]}""" ) print(F"""details : {last_numbers!s}""" ) def _SCREAMING_SNAKE_CASE ( ) ->None: '''simple docstring''' a : Tuple = int(input("Enter lower value : " ).strip() ) a : Dict = int(input("Enter high value : " ).strip() ) a : Optional[int] = int(input("Enter value to guess : " ).strip() ) guess_the_number(_lowercase , _lowercase , _lowercase ) if __name__ == "__main__": main()
31
1
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : Tuple = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __UpperCamelCase ( a__ ): lowerCamelCase : Union[str, Any] ="""gptj""" lowerCamelCase : Dict ={ """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowerCAmelCase__=5_0400 , lowerCAmelCase__=2048 , lowerCAmelCase__=4096 , lowerCAmelCase__=28 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__=None , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=5_0256 , lowerCAmelCase__=5_0256 , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict: a : int = vocab_size a : Tuple = n_positions a : List[Any] = n_embd a : int = n_layer a : Union[str, Any] = n_head a : Optional[int] = n_inner a : Any = rotary_dim a : Any = activation_function a : Optional[int] = resid_pdrop a : Optional[int] = embd_pdrop a : int = attn_pdrop a : Optional[Any] = layer_norm_epsilon a : Optional[int] = initializer_range a : Union[str, Any] = use_cache a : Union[str, Any] = bos_token_id a : Dict = eos_token_id super().__init__( bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ ) class __UpperCamelCase ( a__ ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = "default" , lowerCAmelCase__ = None , lowerCAmelCase__ = False , ) -> Dict: super().__init__(lowerCAmelCase__ , task=lowerCAmelCase__ , patching_specs=lowerCAmelCase__ , use_past=lowerCAmelCase__ ) if not getattr(self._config , "pad_token_id" , lowerCAmelCase__ ): # TODO: how to do that better? a : Optional[Any] = 0 @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: a : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" ) a : Tuple = {0: "batch", 1: "past_sequence + sequence"} else: a : List[Any] = {0: "batch", 1: "sequence"} return common_inputs @property def __a ( self ) -> int: return self._config.n_layer @property def __a ( self ) -> int: return self._config.n_head def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: a : List[Any] = super(lowerCAmelCase__ , self ).generate_dummy_inputs( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ ) # We need to order the input in the way they appears in the forward() a : Dict = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch a, a : Optional[int] = common_inputs["input_ids"].shape # Not using the same length for past_key_values a : Optional[int] = seqlen + 2 a : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) a : List[Any] = [ (torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers ) ] a : List[str] = common_inputs["attention_mask"] if self.use_past: a : List[str] = ordered_inputs["attention_mask"].dtype a : Union[str, Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 ) return ordered_inputs @property def __a ( self ) -> int: return 13
31
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort a : Any = logging.get_logger(__name__) a : Tuple = { '''tensor(bool)''': np.bool_, '''tensor(int8)''': np.inta, '''tensor(uint8)''': np.uinta, '''tensor(int16)''': np.intaa, '''tensor(uint16)''': np.uintaa, '''tensor(int32)''': np.intaa, '''tensor(uint32)''': np.uintaa, '''tensor(int64)''': np.intaa, '''tensor(uint64)''': np.uintaa, '''tensor(float16)''': np.floataa, '''tensor(float)''': np.floataa, '''tensor(double)''': np.floataa, } class __UpperCamelCase : def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str: logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) a : Optional[int] = model a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ ) a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ ) def __call__( self , **lowerCAmelCase__ ) -> Dict: a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()} return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ ) @staticmethod def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]: if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) a : List[str] = "CPUExecutionProvider" return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int: a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name ) a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ ) try: shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) a : str = self.model_save_dir.joinpath(lowerCAmelCase__ ) if src_path.exists(): a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ ) try: shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ ) except shutil.SameFileError: pass def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str: if os.path.isfile(lowerCAmelCase__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) # saving model weights/files self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]: a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowerCAmelCase__ ): a : Tuple = OnnxRuntimeModel.load_model( os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ ) a : Tuple = Path(lowerCAmelCase__ ) # load model from hub else: # download model a : Optional[Any] = hf_hub_download( repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , ) a : Optional[int] = Path(lowerCAmelCase__ ).parent a : List[Any] = Path(lowerCAmelCase__ ).name a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ ) return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]: a : Any = None if len(str(lowerCAmelCase__ ).split("@" ) ) == 2: a, a : Tuple = model_id.split("@" ) return cls._from_pretrained( model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
31
1
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar a : Tuple = TypeVar('''KEY''') a : List[str] = TypeVar('''VAL''') @dataclass(frozen=a__ , slots=a__ ) class __UpperCamelCase ( Generic[KEY, VAL] ): lowerCamelCase : KEY lowerCamelCase : VAL class __UpperCamelCase ( _Item ): def __init__( self ) -> None: super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def __bool__( self ) -> bool: return False a : Any = _DeletedItem() class __UpperCamelCase ( MutableMapping[KEY, VAL] ): def __init__( self , lowerCAmelCase__ = 8 , lowerCAmelCase__ = 0.75 ) -> None: a : Union[str, Any] = initial_block_size a : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 a : Union[str, Any] = capacity_factor a : Union[str, Any] = 0 def __a ( self , lowerCAmelCase__ ) -> int: return hash(lowerCAmelCase__ ) % len(self._buckets ) def __a ( self , lowerCAmelCase__ ) -> int: return (ind + 1) % len(self._buckets ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> bool: a : List[Any] = self._buckets[ind] if not stored: a : Union[str, Any] = _Item(lowerCAmelCase__ , lowerCAmelCase__ ) self._len += 1 return True elif stored.key == key: a : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ ) return True else: return False def __a ( self ) -> bool: a : Tuple = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCAmelCase__ ) def __a ( self ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False a : Dict = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __a ( self , lowerCAmelCase__ ) -> None: a : List[str] = self._buckets a : List[Any] = [None] * new_size a : int = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __a ( self ) -> None: self._resize(len(self._buckets ) * 2 ) def __a ( self ) -> None: self._resize(len(self._buckets ) // 2 ) def __a ( self , lowerCAmelCase__ ) -> Iterator[int]: a : List[str] = self._get_bucket_index(lowerCAmelCase__ ) for _ in range(len(self._buckets ) ): yield ind a : Union[str, Any] = self._get_next_ind(lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None: for ind in self._iterate_buckets(lowerCAmelCase__ ): if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): break def __setitem__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None: if self._is_full(): self._size_up() self._add_item(lowerCAmelCase__ , lowerCAmelCase__ ) def __delitem__( self , lowerCAmelCase__ ) -> None: for ind in self._iterate_buckets(lowerCAmelCase__ ): a : Tuple = self._buckets[ind] if item is None: raise KeyError(lowerCAmelCase__ ) if item is _deleted: continue if item.key == key: a : List[Any] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , lowerCAmelCase__ ) -> VAL: for ind in self._iterate_buckets(lowerCAmelCase__ ): a : Tuple = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCAmelCase__ ) def __len__( self ) -> int: return self._len def __iter__( self ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: a : Optional[Any] = " ,".join( f"""{item.key}: {item.val}""" for item in self._buckets if item ) return f"""HashMap({val_string})"""
31
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str: '''simple docstring''' a : Union[str, Any] = 1.5 a : List[str] = int(factor * num_class_images ) a : Optional[Any] = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 ) os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase ) if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: a : List[Any] = client.query(text=_lowercase ) if len(_lowercase ) >= factor * num_class_images or num_images > 1E4: break else: a : Optional[int] = int(factor * num_images ) a : str = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , ) a : Optional[int] = 0 a : str = 0 a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase ) with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open( F"""{class_data_dir}/images.txt""" , "w" ) as fa: while total < num_class_images: a : Optional[Any] = class_images[count] count += 1 try: a : str = requests.get(images["url"] ) if img.status_code == 200: a : int = Image.open(BytesIO(img.content ) ) with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def _SCREAMING_SNAKE_CASE ( ) ->Dict: '''simple docstring''' a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase ) parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase ) return parser.parse_args() if __name__ == "__main__": a : List[Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
31
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a : Dict = logging.get_logger(__name__) a : Any = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class __UpperCamelCase ( a__ , a__ ): lowerCamelCase : Optional[int] ="""nat""" lowerCamelCase : int ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=64 , lowerCAmelCase__=[3, 4, 6, 5] , lowerCAmelCase__=[2, 4, 8, 16] , lowerCAmelCase__=7 , lowerCAmelCase__=3.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Dict: super().__init__(**lowerCAmelCase__ ) a : Optional[Any] = patch_size a : Optional[int] = num_channels a : List[str] = embed_dim a : List[Any] = depths a : Optional[Any] = len(lowerCAmelCase__ ) a : Any = num_heads a : Dict = kernel_size a : Optional[Any] = mlp_ratio a : Optional[Any] = qkv_bias a : List[Any] = hidden_dropout_prob a : int = attention_probs_dropout_prob a : Tuple = drop_path_rate a : int = hidden_act a : Any = layer_norm_eps a : List[str] = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a : List[str] = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) ) a : str = layer_scale_init_value a : Dict = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase__ ) + 1 )] a, a : Optional[Any] = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
31
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]: '''simple docstring''' if subparsers is not None: a : int = subparsers.add_parser("tpu-config" , description=_description ) else: a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description ) # Core arguments a : Dict = parser.add_argument_group( "Config Arguments" , "Arguments that can be configured through `accelerate config`." ) config_args.add_argument( "--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , ) config_args.add_argument( "--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , ) config_args.add_argument( "--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , ) a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." ) pod_args.add_argument( "--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , ) pod_args.add_argument( "--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , ) pod_args.add_argument( "--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , ) pod_args.add_argument( "--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , ) pod_args.add_argument( "--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , ) pod_args.add_argument( "--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." ) if subparsers is not None: parser.set_defaults(func=_lowercase ) return parser def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple: '''simple docstring''' a : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(_lowercase ): a : Optional[Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: a : int = defaults.command_file if not args.command and defaults.commands is not None: a : Union[str, Any] = defaults.commands if not args.tpu_name: a : int = defaults.tpu_name if not args.tpu_zone: a : Union[str, Any] = defaults.tpu_zone if args.accelerate_version == "dev": a : int = "git+https://github.com/huggingface/accelerate.git" elif args.accelerate_version == "latest": a : Optional[Any] = "accelerate -U" elif isinstance(parse(args.accelerate_version ) , _lowercase ): a : Optional[Any] = F"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("You must specify either a command file or a command to run on the pod." ) if args.command_file: with open(args.command_file , "r" ) as f: a : int = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , _lowercase ): a : Union[str, Any] = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate a : Tuple = ["cd /usr/share"] if args.install_accelerate: new_cmd += [F"""pip install {args.accelerate_version}"""] new_cmd += args.command a : List[Any] = "; ".join(_lowercase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess a : str = ["gcloud"] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F"""Running {' '.join(_lowercase )}""" ) return subprocess.run(_lowercase ) print("Successfully setup pod." ) def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' a : List[Any] = tpu_command_parser() a : Optional[int] = parser.parse_args() tpu_command_launcher(_lowercase )
31
1
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch a : List[Any] = random.Random() def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]: '''simple docstring''' if rng is None: a : Tuple = global_rng a : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class __UpperCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]: a : int = parent a : Tuple = batch_size a : Dict = min_seq_length a : Any = max_seq_length a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) a : Union[str, Any] = feature_size a : Tuple = padding_value a : str = sampling_rate a : Dict = do_normalize a : str = num_mel_bins a : List[str] = hop_length a : str = win_length a : Optional[Any] = win_function a : List[str] = fmin a : Any = fmax a : Optional[int] = mel_floor a : Tuple = return_attention_mask def __a ( self ) -> Optional[Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple: def _flatten(lowerCAmelCase__ ): return list(itertools.chain(*lowerCAmelCase__ ) ) if equal_length: a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size a : str = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs] return speech_inputs def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict: if equal_length: a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size a : Any = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs] return speech_inputs @require_torch class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Tuple =SpeechTaFeatureExtractor def __a ( self ) -> Union[str, Any]: a : Tuple = SpeechTaFeatureExtractionTester(self ) def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]: self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) ) def __a ( self ) -> Union[str, Any]: # Tests that all call wrap to encode_plus and batch_encode_plus a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs] # Test not batched input a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test batched a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def __a ( self ) -> Optional[Any]: a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : int = ["longest", "max_length", "do_not_pad"] a : Tuple = [None, 1600, None] for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ): a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" ) a : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __a ( self ) -> str: a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : List[str] = range(800 , 1400 , 200 ) a : List[str] = [floats_list((1, x) )[0] for x in lengths] a : Any = ["longest", "max_length", "do_not_pad"] a : Any = [None, 1600, None] for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ): a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ ) a : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __a ( self ) -> Dict: a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : Union[str, Any] = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" ) a : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __a ( self ) -> Dict: a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : List[Any] = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" ) a : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : int = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" ) a : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def __a ( self ) -> List[str]: a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a : Any = np.random.rand(100 ).astype(np.floataa ) a : Optional[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __a ( self ) -> Tuple: # Tests that all call wrap to encode_plus and batch_encode_plus a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs] # Test feature size a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test batched a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] a : List[Any] = np.asarray(lowerCAmelCase__ ) a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def __a ( self ) -> str: a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target() a : Any = self.feature_extraction_class(**self.feat_extract_dict ) a : Union[str, Any] = feat_extract.model_input_names[0] a : List[str] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) ) a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ ) a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" ) a : Tuple = processed_features[input_name] if len(batch_features_input.shape ) < 3: a : Dict = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __a ( self ) -> Tuple: a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ ) a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) a : Optional[int] = feat_extract.model_input_names[0] a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" ) a : Tuple = processed_features[input_name] if len(batch_features_input.shape ) < 3: a : List[str] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __a ( self ) -> Optional[Any]: a : Dict = self.feature_extraction_class(**self.feat_extract_dict ) a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target() a : Optional[Any] = feat_extract.model_input_names[0] a : List[str] = BatchFeature({input_name: speech_inputs} ) a : Tuple = feat_extract.num_mel_bins # hack! a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name] a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def __a ( self ) -> Union[str, Any]: a : Any = self.feat_extract_dict a : Optional[Any] = True a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ ) a : Any = self.feat_extract_tester.prepare_inputs_for_target() a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs] a : int = feat_extract.model_input_names[0] a : List[Any] = BatchFeature({input_name: speech_inputs} ) a : Union[str, Any] = feat_extract.num_mel_bins # hack! a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" ) self.assertIn("attention_mask" , lowerCAmelCase__ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a : Tuple = self.feat_extract_dict a : str = True a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ ) a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target() a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs] a : Optional[Any] = feat_extract.model_input_names[0] a : str = BatchFeature({input_name: speech_inputs} ) a : Optional[Any] = min(lowerCAmelCase__ ) a : List[Any] = feat_extract.num_mel_bins # hack! a : Any = feat_extract.pad( lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" ) self.assertIn("attention_mask" , lowerCAmelCase__ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def __a ( self , lowerCAmelCase__ ) -> Optional[int]: from datasets import load_dataset a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def __a ( self ) -> Union[str, Any]: # fmt: off a : List[Any] = torch.tensor( [2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03, 3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03, 2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04, 4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03, 7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04, 4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] ) # fmt: on a : List[str] = self._load_datasamples(1 ) a : Union[str, Any] = SpeechTaFeatureExtractor() a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 9_3680) ) self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) ) def __a ( self ) -> Union[str, Any]: # fmt: off a : Tuple = torch.tensor( [-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777, -3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386, -3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571, -3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] ) # fmt: on a : Dict = self._load_datasamples(1 ) a : Tuple = SpeechTaFeatureExtractor() a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
31
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: a : Tuple = None a : int = logging.get_logger(__name__) a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} a : Optional[int] = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } a : int = { '''facebook/nllb-large-en-ro''': 1024, '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class __UpperCamelCase ( a__ ): lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""] lowerCamelCase : Union[str, Any] =NllbTokenizer lowerCamelCase : List[int] =[] lowerCamelCase : List[int] =[] def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token a : Optional[Any] = legacy_behaviour super().__init__( vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , ) a : int = vocab_file a : Any = False if not self.vocab_file else True a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) a : str = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } a : List[Any] = src_lang if src_lang is not None else "eng_Latn" a : str = self.convert_tokens_to_ids(self._src_lang ) a : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __a ( self ) -> str: return self._src_lang @src_lang.setter def __a ( self , lowerCAmelCase__ ) -> None: a : List[str] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : str = [self.sep_token_id] a : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) a : Dict = src_lang a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ ) a : Any = tgt_lang_id return inputs def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding: a : Optional[int] = src_lang a : int = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) def __a ( self ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def __a ( self ) -> str: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __a ( self , lowerCAmelCase__ ) -> None: a : int = self.convert_tokens_to_ids(lowerCAmelCase__ ) if self.legacy_behaviour: a : Tuple = [] a : List[str] = [self.eos_token_id, self.cur_lang_code] else: a : int = [self.cur_lang_code] a : int = [self.eos_token_id] a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) a : Any = self.convert_ids_to_tokens(self.suffix_tokens ) a : Any = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __a ( self , lowerCAmelCase__ ) -> None: a : str = self.convert_tokens_to_ids(lowerCAmelCase__ ) if self.legacy_behaviour: a : Optional[Any] = [] a : int = [self.eos_token_id, self.cur_lang_code] else: a : List[Any] = [self.cur_lang_code] a : List[Any] = [self.eos_token_id] a : int = self.convert_ids_to_tokens(self.prefix_tokens ) a : int = self.convert_ids_to_tokens(self.suffix_tokens ) a : Any = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return a : Any = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
31
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple: '''simple docstring''' a : Optional[int] = len(_lowercase ) for i in range(length - 1 ): a : Tuple = i for k in range(i + 1 , _lowercase ): if collection[k] < collection[least]: a : Optional[Any] = k if least != i: a, a : List[str] = (collection[i], collection[least]) return collection if __name__ == "__main__": a : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip() a : Optional[Any] = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
31
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCamelCase ( a__ ): lowerCamelCase : torch.FloatTensor lowerCamelCase : torch.FloatTensor lowerCamelCase : Optional[torch.FloatTensor] =None class __UpperCamelCase ( a__ , a__ ): lowerCamelCase : Tuple =2 @register_to_config def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]: # standard deviation of the initial noise distribution a : Tuple = sigma_max # setable values a : int = None a : np.IntTensor = None a : torch.FloatTensor = None # sigma(t_i) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor: return sample def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]: a : List[Any] = num_inference_steps a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ ) a : List[str] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]: if self.config.s_min <= sigma <= self.config.s_max: a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: a : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device ) a : Any = sigma + gamma * sigma a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]: a : Union[str, Any] = sample_hat + sigma_hat * model_output a : Tuple = (sample_hat - pred_original_sample) / sigma_hat a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]: a : Optional[int] = sample_prev + sigma_prev * model_output a : str = (sample_prev - pred_original_sample) / sigma_prev a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: raise NotImplementedError()
31
1
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __UpperCamelCase ( a__ ): @require_torch def __a ( self ) -> Any: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched a : Optional[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a : Union[str, Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a : int = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(lowerCAmelCase__ ) BertModel.from_pretrained(lowerCAmelCase__ ) BertTokenizer.from_pretrained(lowerCAmelCase__ ) pipeline(task="fill-mask" , model=lowerCAmelCase__ ) # baseline - just load from_pretrained with normal network a : List[str] = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a : Optional[Any] = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a : List[str] = "1" a : Dict = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def __a ( self ) -> int: # python one-liner segments # this must be loaded before socket.socket is monkey-patched a : Optional[int] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a : List[str] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a : List[Any] = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(lowerCAmelCase__ ) BertModel.from_pretrained(lowerCAmelCase__ ) BertTokenizer.from_pretrained(lowerCAmelCase__ ) pipeline(task="fill-mask" , model=lowerCAmelCase__ ) # baseline - just load from_pretrained with normal network a : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a : Any = self.get_env() a : Optional[Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def __a ( self ) -> Any: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched a : Any = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n " a : Optional[int] = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n " a : Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " # baseline - just load from_pretrained with normal network a : Tuple = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a : int = self.get_env() a : Optional[Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network a : int = [sys.executable, "-c", "\n".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a : Tuple = "1" a : Optional[int] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def __a ( self ) -> Optional[Any]: a : str = "\nfrom transformers import pipeline\n " a : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n " a : Union[str, Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " a : Any = self.get_env() a : int = "1" a : Optional[int] = [sys.executable, "-c", "\n".join([load, mock, run] )] a : Optional[int] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def __a ( self ) -> List[str]: a : str = "\nfrom transformers import AutoModel\n " a : Union[str, Any] = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n " # baseline - just load from_pretrained with normal network a : str = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a : Tuple = self.get_env() a : str = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a : Dict = "1" a : int = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
31
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal a : Optional[Any] = datasets.utils.logging.get_logger(__name__) a : Union[str, Any] = ['''names''', '''prefix'''] a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] a : Any = ['''encoding_errors''', '''on_bad_lines'''] a : List[str] = ['''date_format'''] @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): lowerCamelCase : str ="," lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[Union[int, List[int], str]] ="infer" lowerCamelCase : Optional[List[str]] =None lowerCamelCase : Optional[List[str]] =None lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None lowerCamelCase : Optional[Union[List[int], List[str]]] =None lowerCamelCase : Optional[str] =None lowerCamelCase : bool =True lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None lowerCamelCase : Optional[list] =None lowerCamelCase : Optional[list] =None lowerCamelCase : bool =False lowerCamelCase : Optional[Union[int, List[int]]] =None lowerCamelCase : Optional[int] =None lowerCamelCase : Optional[Union[str, List[str]]] =None lowerCamelCase : bool =True lowerCamelCase : bool =True lowerCamelCase : bool =False lowerCamelCase : bool =True lowerCamelCase : Optional[str] =None lowerCamelCase : str ="." lowerCamelCase : Optional[str] =None lowerCamelCase : str ='"' lowerCamelCase : int =0 lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[str] =None lowerCamelCase : Optional[str] =None lowerCamelCase : bool =True lowerCamelCase : bool =True lowerCamelCase : int =0 lowerCamelCase : bool =True lowerCamelCase : bool =False lowerCamelCase : Optional[str] =None lowerCamelCase : int =1_0000 lowerCamelCase : Optional[datasets.Features] =None lowerCamelCase : Optional[str] ="strict" lowerCamelCase : Literal["error", "warn", "skip"] ="error" lowerCamelCase : Optional[str] =None def __a ( self ) -> Dict: if self.delimiter is not None: a : int = self.delimiter if self.column_names is not None: a : Any = self.column_names @property def __a ( self ) -> List[str]: a : Dict = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class __UpperCamelCase ( datasets.ArrowBasedBuilder ): lowerCamelCase : Union[str, Any] =CsvConfig def __a ( self ) -> Optional[Any]: return datasets.DatasetInfo(features=self.config.features ) def __a ( self , lowerCAmelCase__ ) -> Optional[int]: if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): a : Tuple = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Tuple = [files] a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] a : int = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Any = [files] a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def __a ( self , lowerCAmelCase__ ) -> pa.Table: if self.config.features is not None: a : Optional[Any] = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def __a ( self , lowerCAmelCase__ ) -> Any: a : Tuple = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str a : Any = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): a : Any = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" ) raise
31
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : list ) ->float: '''simple docstring''' _validate_point(_lowercase ) _validate_point(_lowercase ) if len(_lowercase ) != len(_lowercase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowercase , _lowercase ) ) ) def _SCREAMING_SNAKE_CASE ( _lowercase : list[float] ) ->None: '''simple docstring''' if point: if isinstance(_lowercase , _lowercase ): for item in point: if not isinstance(_lowercase , (int, float) ): a : Tuple = ( "Expected a list of numbers as input, found " F"""{type(_lowercase ).__name__}""" ) raise TypeError(_lowercase ) else: a : Any = F"""Expected a list of numbers as input, found {type(_lowercase ).__name__}""" raise TypeError(_lowercase ) else: raise ValueError("Missing an input" ) def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : list ) ->float: '''simple docstring''' _validate_point(_lowercase ) _validate_point(_lowercase ) if len(_lowercase ) != len(_lowercase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowercase , _lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
31
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( a__ , a__ , unittest.TestCase ): lowerCamelCase : Dict =IFPipeline lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""} lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""} def __a ( self ) -> List[str]: return self._get_dummy_components() def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict: if str(lowerCAmelCase__ ).startswith("mps" ): a : Tuple = torch.manual_seed(lowerCAmelCase__ ) else: a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self ) -> Union[str, Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __a ( self ) -> Union[str, Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __a ( self ) -> Optional[int]: self._test_save_load_local() def __a ( self ) -> Tuple: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self ) -> str: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ) -> Tuple: # if a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) a : str = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() a : Optional[int] = None a : Optional[int] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components ) a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: # pipeline 1 _start_torch_memory_measurement() a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) a : Dict = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (64, 64, 3) a : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 a : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) # pipeline 2 _start_torch_memory_measurement() a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Union[str, Any] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (256, 256, 3) a : int = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 a : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: # pipeline 1 _start_torch_memory_measurement() a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) a : List[Any] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , ) a : Tuple = output.images[0] assert image.shape == (64, 64, 3) a : int = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 a : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) # pipeline 2 _start_torch_memory_measurement() a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Dict = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ) a : int = output.images[0] assert image.shape == (256, 256, 3) a : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 a : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: # pipeline 1 _start_torch_memory_measurement() a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ ) a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) a : List[str] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , ) a : List[Any] = output.images[0] assert image.shape == (64, 64, 3) a : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 a : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) # pipeline 2 _start_torch_memory_measurement() a : str = torch.Generator(device="cpu" ).manual_seed(0 ) a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ ) a : Optional[int] = pipe_a( prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (256, 256, 3) a : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 a : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( ) ->List[str]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
31
1
"""simple docstring""" from __future__ import annotations from math import ceil, floor, sqrt def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200_0000 ) ->int: '''simple docstring''' a : list[int] = [0] a : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target a : int = 0 # the area corresponding to the grid that gives the product closest to target a : int = 0 # an estimate of b, using the quadratic formula a : float # the largest integer less than b_estimate a : int # the largest integer less than b_estimate a : int # the triangle number corresponding to b_floor a : int # the triangle number corresponding to b_ceil a : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): a : List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 a : Any = floor(_lowercase ) a : Optional[Any] = ceil(_lowercase ) a : Union[str, Any] = triangle_numbers[b_floor] a : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): a : List[str] = triangle_b_first_guess * triangle_a a : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): a : Any = triangle_b_second_guess * triangle_a a : int = idx_a * b_ceil return area if __name__ == "__main__": print(F'''{solution() = }''')
31
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> Optional[Any]: a : Optional[int] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Optional[Any]: a : str = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Dict: a : List[str] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> List[Any]: a : Optional[Any] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Tuple: a : Tuple = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) ) def __a ( self ) -> Dict: a : Dict = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] a : Dict = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> List[str]: a : List[Any] = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] a : Any = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> int: # pass variant but use the non-variant filenames a : int = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] a : Tuple = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> str: a : str = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] a : Any = "fp16" self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> str: a : Union[str, Any] = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] a : str = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> List[str]: # pass variant but use the non-variant filenames a : Optional[int] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] a : str = "fp16" self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def __a ( self ) -> Optional[Any]: a : Any = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] a : Optional[int] = "fp16" self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
31
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' return round(float(moles / volume ) * nfactor ) def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' return round(float((moles * 0.0821 * temperature) / (volume) ) ) def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
31
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class __UpperCamelCase ( nn.Module ): lowerCamelCase : int lowerCamelCase : jnp.dtype =jnp.floataa def __a ( self ) -> Tuple: a : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , lowerCAmelCase__ ) -> Optional[Any]: a, a, a, a : List[str] = hidden_states.shape a : List[Any] = jax.image.resize( lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) a : List[str] = self.conv(lowerCAmelCase__ ) return hidden_states class __UpperCamelCase ( nn.Module ): lowerCamelCase : int lowerCamelCase : jnp.dtype =jnp.floataa def __a ( self ) -> Dict: a : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , lowerCAmelCase__ ) -> Tuple: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) a : Tuple = self.conv(lowerCAmelCase__ ) return hidden_states class __UpperCamelCase ( nn.Module ): lowerCamelCase : int lowerCamelCase : int =None lowerCamelCase : float =0.0 lowerCamelCase : bool =None lowerCamelCase : jnp.dtype =jnp.floataa def __a ( self ) -> int: a : Dict = self.in_channels if self.out_channels is None else self.out_channels a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) a : List[Any] = nn.Conv( lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype ) a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) a : Optional[int] = nn.Dropout(self.dropout_prob ) a : Dict = nn.Conv( lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut a : List[str] = None if use_nin_shortcut: a : Optional[Any] = nn.Conv( lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str: a : int = hidden_states a : Tuple = self.norma(lowerCAmelCase__ ) a : Any = nn.swish(lowerCAmelCase__ ) a : int = self.conva(lowerCAmelCase__ ) a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) ) a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 ) a : Dict = hidden_states + temb a : str = self.norma(lowerCAmelCase__ ) a : List[Any] = nn.swish(lowerCAmelCase__ ) a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[str] = self.conva(lowerCAmelCase__ ) if self.conv_shortcut is not None: a : Tuple = self.conv_shortcut(lowerCAmelCase__ ) return hidden_states + residual
31
1
"""simple docstring""" from pathlib import Path import fire def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str , _lowercase : int ) ->List[str]: '''simple docstring''' a : List[Any] = Path(_lowercase ) a : str = Path(_lowercase ) dest_dir.mkdir(exist_ok=_lowercase ) for path in src_dir.iterdir(): a : int = [x.rstrip() for x in list(path.open().readlines() )][:n] a : Optional[Any] = dest_dir.joinpath(path.name ) print(_lowercase ) dest_path.open("w" ).write("\n".join(_lowercase ) ) if __name__ == "__main__": fire.Fire(minify)
31
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]: '''simple docstring''' if subparsers is not None: a : Dict = subparsers.add_parser("test" ) else: a : Tuple = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=_lowercase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=_lowercase ) return parser def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str: '''simple docstring''' a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: a : int = script_name else: a : int = F"""--config_file={args.config_file} {script_name}""" a : Optional[int] = ["accelerate-launch"] + test_args.split() a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' a : Any = test_command_parser() a : Union[str, Any] = parser.parse_args() test_command(_lowercase ) if __name__ == "__main__": main()
31
1
"""simple docstring""" from __future__ import annotations class __UpperCamelCase : def __init__( self , lowerCAmelCase__ ) -> None: a : Any = data a : Node | None = None a : Node | None = None def _SCREAMING_SNAKE_CASE ( _lowercase : Node | None ) ->None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def _SCREAMING_SNAKE_CASE ( _lowercase : Node | None ) ->int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def _SCREAMING_SNAKE_CASE ( _lowercase : Node ) ->bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def _SCREAMING_SNAKE_CASE ( ) ->None: # Main function for testing. '''simple docstring''' a : Tuple = Node(1 ) a : List[str] = Node(2 ) a : int = Node(3 ) a : Tuple = Node(4 ) a : int = Node(5 ) a : List[str] = Node(6 ) a : List[Any] = Node(7 ) a : Dict = Node(8 ) a : Optional[int] = Node(9 ) print(is_full_binary_tree(_lowercase ) ) print(depth_of_tree(_lowercase ) ) print("Tree is: " ) display(_lowercase ) if __name__ == "__main__": main()
31
"""simple docstring""" a : str = 8.314_4598 def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' if temperature < 0: raise Exception("Temperature cannot be less than 0 K" ) if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a : Any = 300 a : Dict = 28 a : Dict = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
31
1
"""simple docstring""" a : str = 8.31_4462 # Unit - J mol-1 K-1 def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
31
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __UpperCamelCase ( unittest.TestCase ): def __a ( self , lowerCAmelCase__ ) -> Optional[int]: a : str = 3 a : str = 250 a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ ) a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length return input_ids, scores def __a ( self ) -> List[Any]: a, a : str = self._get_tensors(5 ) a : Any = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : str = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> List[Any]: a : Optional[Any] = MaxLengthCriteria(max_length=10 ) a, a : int = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> List[str]: a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) a, a : str = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a, a : int = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a : List[Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __a ( self ) -> str: a, a : Tuple = self._get_tensors(5 ) a : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __a ( self ) -> str: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(lowerCAmelCase__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(lowerCAmelCase__ ) , 1 )
31
1
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->list: '''simple docstring''' if len(_lowercase ) == 0: return [] a, a : Optional[Any] = min(_lowercase ), max(_lowercase ) a : Union[str, Any] = int(max_value - min_value ) + 1 a : list[list] = [[] for _ in range(_lowercase )] for i in my_list: buckets[int(i - min_value )].append(_lowercase ) return [v for bucket in buckets for v in sorted(_lowercase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
31
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int: '''simple docstring''' a : Dict = [1, 2, 5, 10, 20, 50, 100, 200] a : Optional[Any] = [0] * (pence + 1) a : List[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_lowercase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73682
31
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int: '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" ) return min_val if option else max_val def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int: '''simple docstring''' return int((number_a + number_a) / 2 ) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None: '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)" ) if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value" ) def answer(_lowercase : int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started..." ) a : Optional[Any] = lower a : List[Any] = higher a : Tuple = [] while True: a : List[Any] = get_avg(_lowercase , _lowercase ) last_numbers.append(_lowercase ) if answer(_lowercase ) == "low": a : Optional[int] = number elif answer(_lowercase ) == "high": a : Tuple = number else: break print(F"""guess the number : {last_numbers[-1]}""" ) print(F"""details : {last_numbers!s}""" ) def _SCREAMING_SNAKE_CASE ( ) ->None: '''simple docstring''' a : Tuple = int(input("Enter lower value : " ).strip() ) a : Dict = int(input("Enter high value : " ).strip() ) a : Optional[int] = int(input("Enter value to guess : " ).strip() ) guess_the_number(_lowercase , _lowercase , _lowercase ) if __name__ == "__main__": main()
31
"""simple docstring""" from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=a__ ): lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""] def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]: requires_backends(self , ["transformers", "torch", "note_seq"] ) @classmethod def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any: requires_backends(cls , ["transformers", "torch", "note_seq"] ) @classmethod def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int: requires_backends(cls , ["transformers", "torch", "note_seq"] )
31
1
"""simple docstring""" import os def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->List[str]: '''simple docstring''' a : List[str] = len(grid[0] ) a : Union[str, Any] = len(_lowercase ) a : str = 0 a : List[str] = 0 a : List[Any] = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(_lowercase ): for j in range(n_rows - 3 ): a : Tuple = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] a : Dict = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: a : int = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: a : int = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) a : List[Any] = max( _lowercase , _lowercase , _lowercase , _lowercase ) if max_product > largest: a : Any = max_product return largest def _SCREAMING_SNAKE_CASE ( ) ->Union[str, Any]: '''simple docstring''' a : List[str] = [] with open(os.path.dirname(_lowercase ) + "/grid.txt" ) as file: for line in file: grid.append(line.strip("\n" ).split(" " ) ) a : Dict = [[int(_lowercase ) for i in grid[j]] for j in range(len(_lowercase ) )] return largest_product(_lowercase ) if __name__ == "__main__": print(solution())
31
"""simple docstring""" import qiskit def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts: '''simple docstring''' a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" ) # Create a Quantum Circuit acting on the q register a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_lowercase ) if __name__ == "__main__": print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
31
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a : Union[str, Any] = logging.get_logger(__name__) a : Union[str, Any] = { '''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''', } class __UpperCamelCase ( a__ , a__ ): lowerCamelCase : Tuple ="""convnextv2""" def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0.0 , lowerCAmelCase__=224 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[str]: super().__init__(**lowerCAmelCase__ ) a : List[Any] = num_channels a : str = patch_size a : Union[str, Any] = num_stages a : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes a : str = [3, 3, 9, 3] if depths is None else depths a : int = hidden_act a : str = initializer_range a : List[str] = layer_norm_eps a : Optional[int] = drop_path_rate a : Dict = image_size a : Any = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a, a : Optional[Any] = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
31
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict: '''simple docstring''' a : List[str] = 0 if start < end: a : Tuple = randint(_lowercase , _lowercase ) a : List[str] = a[end] a : str = a[pivot] a : Optional[int] = temp a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase ) count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 ) count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase ) return count def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str: '''simple docstring''' a : Union[str, Any] = 0 a : List[Any] = randint(_lowercase , _lowercase ) a : int = a[end] a : List[str] = a[pivot] a : Tuple = temp a : Union[str, Any] = start - 1 for index in range(_lowercase , _lowercase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value a : List[str] = new_pivot_index + 1 a : Optional[int] = a[new_pivot_index] a : Union[str, Any] = a[index] a : List[Any] = temp a : Tuple = a[new_pivot_index + 1] a : str = a[end] a : Dict = temp return new_pivot_index + 1, count a : int = TemporaryFile() a : Tuple = 100 # 1000 elements are to be sorted a , a : int = 0, 1 # mean and standard deviation a : List[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array a : int = np.load(outfile) a : Tuple = len(M) - 1 a : Union[str, Any] = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
31
1