code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig _a = logging.get_logger(__name__) # General docstring _a = '''MobileNetV1Config''' # Base docstring _a = '''google/mobilenet_v1_1.0_224''' _a = [1, 1_024, 7, 7] # Image classification docstring _a = '''google/mobilenet_v1_1.0_224''' _a = '''tabby, tabby cat''' _a = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=None ) -> Any: '''simple docstring''' lowerCamelCase__ = {} if isinstance(__UpperCamelCase ,__UpperCamelCase ): lowerCamelCase__ = model.mobilenet_va else: lowerCamelCase__ = model lowerCamelCase__ = """MobilenetV1/Conv2d_0/""" lowerCamelCase__ = backbone.conv_stem.convolution.weight lowerCamelCase__ = backbone.conv_stem.normalization.bias lowerCamelCase__ = backbone.conv_stem.normalization.weight lowerCamelCase__ = backbone.conv_stem.normalization.running_mean lowerCamelCase__ = backbone.conv_stem.normalization.running_var for i in range(13 ): lowerCamelCase__ = i + 1 lowerCamelCase__ = i * 2 lowerCamelCase__ = backbone.layer[pt_index] lowerCamelCase__ = F'MobilenetV1/Conv2d_{tf_index}_depthwise/' lowerCamelCase__ = pointer.convolution.weight lowerCamelCase__ = pointer.normalization.bias lowerCamelCase__ = pointer.normalization.weight lowerCamelCase__ = pointer.normalization.running_mean lowerCamelCase__ = pointer.normalization.running_var lowerCamelCase__ = backbone.layer[pt_index + 1] lowerCamelCase__ = F'MobilenetV1/Conv2d_{tf_index}_pointwise/' lowerCamelCase__ = pointer.convolution.weight lowerCamelCase__ = pointer.normalization.bias lowerCamelCase__ = pointer.normalization.weight lowerCamelCase__ = pointer.normalization.running_mean lowerCamelCase__ = pointer.normalization.running_var if isinstance(__UpperCamelCase ,__UpperCamelCase ): lowerCamelCase__ = """MobilenetV1/Logits/Conv2d_1c_1x1/""" lowerCamelCase__ = model.classifier.weight lowerCamelCase__ = model.classifier.bias return tf_to_pt_map def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str: '''simple docstring''' try: import numpy as np import tensorflow as tf except ImportError: logger.error( '''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ''' '''https://www.tensorflow.org/install/ for installation instructions.''' ) raise # Load weights from TF model lowerCamelCase__ = tf.train.list_variables(__UpperCamelCase ) lowerCamelCase__ = {} for name, shape in init_vars: logger.info(F'Loading TF weight {name} with shape {shape}' ) lowerCamelCase__ = tf.train.load_variable(__UpperCamelCase ,__UpperCamelCase ) lowerCamelCase__ = array # Build TF to PyTorch weights loading map lowerCamelCase__ = _build_tf_to_pytorch_map(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) for name, pointer in tf_to_pt_map.items(): logger.info(F'Importing {name}' ) if name not in tf_weights: logger.info(F'{name} not in tf pre-trained weights, skipping' ) continue lowerCamelCase__ = tf_weights[name] if "depthwise_weights" in name: logger.info('''Transposing depthwise''' ) lowerCamelCase__ = np.transpose(__UpperCamelCase ,(2, 3, 0, 1) ) elif "weights" in name: logger.info('''Transposing''' ) if len(pointer.shape ) == 2: # copying into linear layer lowerCamelCase__ = array.squeeze().transpose() else: lowerCamelCase__ = np.transpose(__UpperCamelCase ,(3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' ) logger.info(F'Initialize PyTorch weight {name} {array.shape}' ) lowerCamelCase__ = torch.from_numpy(__UpperCamelCase ) tf_weights.pop(__UpperCamelCase ,__UpperCamelCase ) tf_weights.pop(name + '''/RMSProp''' ,__UpperCamelCase ) tf_weights.pop(name + '''/RMSProp_1''' ,__UpperCamelCase ) tf_weights.pop(name + '''/ExponentialMovingAverage''' ,__UpperCamelCase ) logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' ) return model def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[Any]: '''simple docstring''' lowerCamelCase__ = features.shape[-2:] lowerCamelCase__ = conv_layer.stride lowerCamelCase__ = conv_layer.kernel_size if in_height % stride_height == 0: lowerCamelCase__ = max(kernel_height - stride_height ,0 ) else: lowerCamelCase__ = max(kernel_height - (in_height % stride_height) ,0 ) if in_width % stride_width == 0: lowerCamelCase__ = max(kernel_width - stride_width ,0 ) else: lowerCamelCase__ = max(kernel_width - (in_width % stride_width) ,0 ) lowerCamelCase__ = pad_along_width // 2 lowerCamelCase__ = pad_along_width - pad_left lowerCamelCase__ = pad_along_height // 2 lowerCamelCase__ = pad_along_height - pad_top lowerCamelCase__ = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__UpperCamelCase ,__UpperCamelCase ,'''constant''' ,0.0 ) class __A ( nn.Module ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = 1 , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = True , ): '''simple docstring''' super().__init__() lowerCamelCase__ = config if in_channels % groups != 0: raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' ) if out_channels % groups != 0: raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' ) lowerCamelCase__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) lowerCamelCase__ = nn.Convad( in_channels=_lowercase , out_channels=_lowercase , kernel_size=_lowercase , stride=_lowercase , padding=_lowercase , groups=_lowercase , bias=_lowercase , padding_mode='''zeros''' , ) if use_normalization: lowerCamelCase__ = nn.BatchNormad( num_features=_lowercase , eps=config.layer_norm_eps , momentum=0.9997 , affine=_lowercase , track_running_stats=_lowercase , ) else: lowerCamelCase__ = None if use_activation: if isinstance(_lowercase , _lowercase ): lowerCamelCase__ = ACTaFN[use_activation] elif isinstance(config.hidden_act , _lowercase ): lowerCamelCase__ = ACTaFN[config.hidden_act] else: lowerCamelCase__ = config.hidden_act else: lowerCamelCase__ = None def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if self.config.tf_padding: lowerCamelCase__ = apply_tf_padding(_lowercase , self.convolution ) lowerCamelCase__ = self.convolution(_lowercase ) if self.normalization is not None: lowerCamelCase__ = self.normalization(_lowercase ) if self.activation is not None: lowerCamelCase__ = self.activation(_lowercase ) return features class __A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' lowerCAmelCase_ = MobileNetVaConfig lowerCAmelCase_ = load_tf_weights_in_mobilenet_va lowerCAmelCase_ = """mobilenet_v1""" lowerCAmelCase_ = """pixel_values""" lowerCAmelCase_ = False def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if isinstance(_lowercase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(_lowercase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) _a = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' _a = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( """The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE__ , ) class __A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase = True ): '''simple docstring''' super().__init__(_lowercase ) lowerCamelCase__ = config lowerCamelCase__ = 3_2 lowerCamelCase__ = max(int(depth * config.depth_multiplier ) , config.min_depth ) lowerCamelCase__ = MobileNetVaConvLayer( _lowercase , in_channels=config.num_channels , out_channels=_lowercase , kernel_size=3 , stride=2 , ) lowerCamelCase__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] lowerCamelCase__ = nn.ModuleList() for i in range(1_3 ): lowerCamelCase__ = out_channels if strides[i] == 2 or i == 0: depth *= 2 lowerCamelCase__ = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( _lowercase , in_channels=_lowercase , out_channels=_lowercase , kernel_size=3 , stride=strides[i] , groups=_lowercase , ) ) self.layer.append( MobileNetVaConvLayer( _lowercase , in_channels=_lowercase , out_channels=_lowercase , kernel_size=1 , ) ) lowerCamelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(_lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __lowerCamelCase ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ): '''simple docstring''' lowerCamelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) lowerCamelCase__ = self.conv_stem(_lowercase ) lowerCamelCase__ = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): lowerCamelCase__ = layer_module(_lowercase ) if output_hidden_states: lowerCamelCase__ = all_hidden_states + (hidden_states,) lowerCamelCase__ = hidden_states if self.pooler is not None: lowerCamelCase__ = torch.flatten(self.pooler(_lowercase ) , start_dim=1 ) else: lowerCamelCase__ = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_lowercase , pooler_output=_lowercase , hidden_states=_lowercase , ) @add_start_docstrings( """ MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , SCREAMING_SNAKE_CASE__ , ) class __A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase ): '''simple docstring''' super().__init__(_lowercase ) lowerCamelCase__ = config.num_labels lowerCamelCase__ = MobileNetVaModel(_lowercase ) lowerCamelCase__ = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head lowerCamelCase__ = nn.Dropout(config.classifier_dropout_prob , inplace=_lowercase ) lowerCamelCase__ = nn.Linear(_lowercase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __lowerCamelCase ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ): '''simple docstring''' lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase__ = self.mobilenet_va(_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase ) lowerCamelCase__ = outputs.pooler_output if return_dict else outputs[1] lowerCamelCase__ = self.classifier(self.dropout(_lowercase ) ) lowerCamelCase__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCamelCase__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCamelCase__ = """single_label_classification""" else: lowerCamelCase__ = """multi_label_classification""" if self.config.problem_type == "regression": lowerCamelCase__ = MSELoss() if self.num_labels == 1: lowerCamelCase__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCamelCase__ = loss_fct(_lowercase , _lowercase ) elif self.config.problem_type == "single_label_classification": lowerCamelCase__ = CrossEntropyLoss() lowerCamelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCamelCase__ = BCEWithLogitsLoss() lowerCamelCase__ = loss_fct(_lowercase , _lowercase ) if not return_dict: lowerCamelCase__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=_lowercase , logits=_lowercase , hidden_states=outputs.hidden_states , )
721
from queue import PriorityQueue from typing import Any import numpy as np def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int: '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf ) lowerCamelCase__ = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowerCamelCase__ = new_cost_f lowerCamelCase__ = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = -1 lowerCamelCase__ = set() lowerCamelCase__ = set() lowerCamelCase__ = {source: 0} lowerCamelCase__ = {destination: 0} lowerCamelCase__ = {source: None} lowerCamelCase__ = {destination: None} lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase__ , lowerCamelCase__ = queue_forward.get() visited_forward.add(__snake_case ) lowerCamelCase__ , lowerCamelCase__ = queue_backward.get() visited_backward.add(__snake_case ) lowerCamelCase__ = pass_and_relaxation( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) lowerCamelCase__ = pass_and_relaxation( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase__ = shortest_distance return shortest_path_distance _a = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } _a = { "B": [["E", 1]], "C": [["B", 1]], "D": [["C", 1]], "F": [["D", 1], ["G", 1]], "E": [[None, np.inf]], "G": [["E", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
29
0
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) lowerCamelCase__ = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) lowerCamelCase__ = '''The dog is cute and lives in the garden house''' lowerCamelCase__ = jnp.array([tokenizer.encode(A_ )] ) lowerCamelCase__ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim lowerCamelCase__ = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) lowerCamelCase__ = model(A_ )['''last_hidden_state'''] self.assertEqual(output.shape , A_ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , A_ , atol=1E-3 ) )
700
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """ClapFeatureExtractor""" lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if audios is not None: lowerCamelCase__ = self.feature_extractor( __lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and audios is not None: lowerCamelCase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer.model_input_names lowerCamelCase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
29
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { "uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json", } class __A ( lowercase__ ): '''simple docstring''' lowerCAmelCase_ = 'mra' def __init__( self , __lowerCAmelCase=5_0_2_6_5 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="absolute" , __lowerCAmelCase=4 , __lowerCAmelCase="full" , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) lowerCamelCase__ = vocab_size lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = initializer_range lowerCamelCase__ = type_vocab_size lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = position_embedding_type lowerCamelCase__ = block_per_row lowerCamelCase__ = approx_mode lowerCamelCase__ = initial_prior_first_n_blocks lowerCamelCase__ = initial_prior_diagonal_n_blocks
701
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope lowerCamelCase__ = projection_dim def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFDPRModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' ) lowerCamelCase__ = tf.constant( [[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP] lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ 0.0323_6253, 0.1275_3335, 0.1681_8509, 0.0027_9786, 0.389_6933, 0.2426_4945, 0.217_8971, -0.0233_5227, -0.0848_1959, -0.1432_4117, ] ] ) self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline _a = { "n_samples": 64, "horizon": 32, "num_inference_steps": 20, "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network "scale_grad_by_std": True, "scale": 0.1, "eta": 0.0, "t_grad_cutoff": 2, "device": "cpu", } if __name__ == "__main__": _a = "hopper-medium-v2" _a = gym.make(env_name) _a = ValueGuidedRLPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", env=env, ) env.seed(0) _a = env.reset() _a = 0 _a = 0 _a = 1_000 _a = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy _a = pipeline(obs, planning_horizon=32) # execute action in environment _a , _a , _a , _a = env.step(denorm_actions) _a = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:""" f""" {total_score}""" ) # save observations for rendering rollout.append(next_observation.copy()) _a = next_observation except KeyboardInterrupt: pass print(f"""Total reward: {total_reward}""")
702
import string from math import logaa def lowerCAmelCase__(__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = document.translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' ) lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]: '''simple docstring''' lowerCamelCase__ = corpus.lower().translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with '' lowerCamelCase__ = corpus_without_punctuation.split('''\n''' ) lowerCamelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__snake_case )) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float: '''simple docstring''' if smoothing: if n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError('''df must be > 0''' ) elif n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(logaa(n / df ) ,3 ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> float: '''simple docstring''' return round(tf * idf ,3 )
29
0
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=3_0 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=None , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = num_patches + 1 def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFViTModel(config=__A ) lowerCamelCase__ = model(__A , training=__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. lowerCamelCase__ = self.image_size // 2 lowerCamelCase__ = pixel_values[:, :, :image_size, :image_size] lowerCamelCase__ = model(__A , interpolate_pos_encoding=__A , training=__A ) lowerCamelCase__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.type_sequence_label_size lowerCamelCase__ = TFViTForImageClassification(__A ) lowerCamelCase__ = model(__A , labels=__A , training=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. lowerCamelCase__ = self.image_size // 2 lowerCamelCase__ = pixel_values[:, :, :image_size, :image_size] lowerCamelCase__ = model(__A , interpolate_pos_encoding=__A , training=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTForImageClassification(__A ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class __A ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCAmelCase_ = ( {"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification} if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFViTModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__A ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __A ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(__A ) def lowerCAmelCase__() -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def __lowerCamelCase ( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=__A , return_tensors='''tf''' ) # forward pass lowerCamelCase__ = model(**__A ) # verify the logits lowerCamelCase__ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __A ) lowerCamelCase__ = tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1E-4 )
703
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
29
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class __A ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) lowerCamelCase__ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) lowerCamelCase__ = InstructBlipProcessor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __lowerCamelCase ( self , **__lowerCAmelCase ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer def __lowerCamelCase ( self , **__lowerCAmelCase ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor def __lowerCamelCase ( self , **__lowerCAmelCase ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).qformer_tokenizer def __lowerCamelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCamelCase__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) lowerCamelCase__ = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor.qformer_tokenizer , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = self.get_qformer_tokenizer() lowerCamelCase__ = InstructBlipProcessor( tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors='''np''' ) lowerCamelCase__ = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = self.get_qformer_tokenizer() lowerCamelCase__ = InstructBlipProcessor( tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase ) lowerCamelCase__ = "lower newer" lowerCamelCase__ = processor(text=__lowerCAmelCase ) lowerCamelCase__ = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = qformer_tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = self.get_qformer_tokenizer() lowerCamelCase__ = InstructBlipProcessor( tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase ) lowerCamelCase__ = "lower newer" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = self.get_qformer_tokenizer() lowerCamelCase__ = InstructBlipProcessor( tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(__lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = self.get_qformer_tokenizer() lowerCamelCase__ = InstructBlipProcessor( tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase ) lowerCamelCase__ = "lower newer" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
704
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _a = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' return (abs(source - target ) / target) < 0.0_1 @pytest.mark.integration def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case ) lowerCamelCase__ = TestCommand(*__snake_case ) test_command.run() lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' ) assert os.path.exists(__snake_case ) lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case ) lowerCamelCase__ = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) ,splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] ,download_size=3940680 ,dataset_size=2589981 ,) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case ,__snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes ) else: result == expected
29
0
'''simple docstring''' def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(__snake_case ,__snake_case ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(__snake_case ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
705
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = 1_3 lowerCamelCase__ = 7 lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = 9_9 lowerCamelCase__ = 3_2 lowerCamelCase__ = 2 lowerCamelCase__ = 4 lowerCamelCase__ = 3_7 lowerCamelCase__ = '''gelu''' lowerCamelCase__ = 0.1 lowerCamelCase__ = 0.1 lowerCamelCase__ = 5_1_2 lowerCamelCase__ = 1_6 lowerCamelCase__ = 2 lowerCamelCase__ = 0.02 lowerCamelCase__ = 3 lowerCamelCase__ = 4 lowerCamelCase__ = None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): '''simple docstring''' ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = self.prepare_config_and_inputs() lowerCamelCase__ = True lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = True lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''encoder_hidden_states''': encoder_hidden_states, '''encoder_attention_mask''': encoder_attention_mask, } lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ) # Also check the case where encoder outputs are not passed lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase ) lowerCamelCase__ = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.num_labels lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__lowerCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCamelCase__ = model.get_bias() assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) for k, v in name.items(): assert isinstance(__lowerCAmelCase , tf.Variable ) else: lowerCamelCase__ = model.get_output_embeddings() assert x is None lowerCamelCase__ = model.get_bias() assert name is None @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] lowerCamelCase__ = [1, 6, 3_3] self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase ) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
from collections import deque def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = len(snake_case__ ) lowerCamelCase__ = deque() lowerCamelCase__ = [False for _ in range(snake_case__ )] lowerCamelCase__ = [-1 for _ in range(snake_case__ )] lowerCamelCase__ = index_of[:] def strong_connect(__snake_case ,__snake_case ,__snake_case ): lowerCamelCase__ = index # the number when this node is seen lowerCamelCase__ = index # lowest rank node reachable from here index += 1 stack.append(snake_case__ ) lowerCamelCase__ = True for w in g[v]: if index_of[w] == -1: lowerCamelCase__ = strong_connect(snake_case__ ,snake_case__ ,snake_case__ ) lowerCamelCase__ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: lowerCamelCase__ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: lowerCamelCase__ = [] lowerCamelCase__ = stack.pop() lowerCamelCase__ = False component.append(snake_case__ ) while w != v: lowerCamelCase__ = stack.pop() lowerCamelCase__ = False component.append(snake_case__ ) components.append(snake_case__ ) return index lowerCamelCase__ = [] for v in range(snake_case__ ): if index_of[v] == -1: strong_connect(snake_case__ ,0 ,snake_case__ ) return components def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[int]: '''simple docstring''' lowerCamelCase__ = [[] for _ in range(snake_case__ )] for u, v in edges: g[u].append(snake_case__ ) return g if __name__ == "__main__": # Test _a = 7 _a = [0, 0, 1, 2, 3, 3, 4, 4, 6] _a = [1, 3, 2, 0, 1, 4, 5, 6, 5] _a = [(u, v) for u, v in zip(source, target)] _a = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
706
from math import sqrt def lowerCAmelCase__(__snake_case ) -> bool: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' must been an int and positive" lowerCamelCase__ = True # 0 and 1 are none primes. if number <= 1: lowerCamelCase__ = False for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCamelCase__ = False break # precondition assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool" return status def lowerCAmelCase__(__snake_case ) -> Any: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCamelCase__ = list(range(2 ,n + 1 ) ) lowerCamelCase__ = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(__snake_case ) ): for j in range(i + 1 ,len(__snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCamelCase__ = 0 # filters actual prime numbers. lowerCamelCase__ = [x for x in begin_list if x != 0] # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2" lowerCamelCase__ = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 ,n + 1 ): if is_prime(__snake_case ): ans.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0" lowerCamelCase__ = [] # this list will be returns of the function. # potential prime number factors. lowerCamelCase__ = 2 lowerCamelCase__ = number if number == 0 or number == 1: ans.append(__snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(__snake_case ): while quotient != 1: if is_prime(__snake_case ) and (quotient % factor == 0): ans.append(__snake_case ) quotient /= factor else: factor += 1 else: ans.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCamelCase__ = 0 # prime factorization of 'number' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = max(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int" return ans def lowerCAmelCase__(__snake_case ) -> Dict: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCamelCase__ = 0 # prime factorization of 'number' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = min(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int" return ans def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case ) ), "'number' must been an int, even and > 2" lowerCamelCase__ = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCamelCase__ = get_prime_numbers(__snake_case ) lowerCamelCase__ = len(__snake_case ) # run variable for while-loops. lowerCamelCase__ = 0 lowerCamelCase__ = None # exit variable. for break up the loops lowerCamelCase__ = True while i < len_pn and loop: lowerCamelCase__ = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCamelCase__ = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(__snake_case ,__snake_case ) and (len(__snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase__(__snake_case ,__snake_case ) -> str: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCamelCase__ = 0 while numbera != 0: lowerCamelCase__ = numbera % numbera lowerCamelCase__ = numbera lowerCamelCase__ = rest # precondition assert isinstance(__snake_case ,__snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCamelCase__ = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = prime_factorization(__snake_case ) elif numbera == 1 or numbera == 1: lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = max(__snake_case ,__snake_case ) lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCamelCase__ = prime_fac_a.count(__snake_case ) lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(max(__snake_case ,__snake_case ) ): ans *= n else: lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int" lowerCamelCase__ = 0 lowerCamelCase__ = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(__snake_case ): ans += 1 # precondition assert isinstance(__snake_case ,__snake_case ) and is_prime( __snake_case ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict: '''simple docstring''' assert ( is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCamelCase__ = p_number_a + 1 # jump to the next number lowerCamelCase__ = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(__snake_case ): number += 1 while number < p_number_a: ans.append(__snake_case ) number += 1 # fetch the next prime number. while not is_prime(__snake_case ): number += 1 # precondition assert ( isinstance(__snake_case ,__snake_case ) and ans[0] != p_number_a and ans[len(__snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1" lowerCamelCase__ = [] # will be returned. for divisor in range(1 ,n + 1 ): if n % divisor == 0: ans.append(__snake_case ) # precondition assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCamelCase__ = get_divisors(__snake_case ) # precondition assert ( isinstance(__snake_case ,__snake_case ) and (divisors[0] == 1) and (divisors[len(__snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) ) # precondition assert ( isinstance(__snake_case ,__snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase__(__snake_case ) -> Optional[int]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0" lowerCamelCase__ = 1 # this will be return. for factor in range(1 ,n + 1 ): ans *= factor return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0" lowerCamelCase__ = 0 lowerCamelCase__ = 1 lowerCamelCase__ = 1 # this will be return for _ in range(n - 1 ): lowerCamelCase__ = ans ans += fiba lowerCamelCase__ = tmp return ans
29
0
import math import qiskit def lowerCAmelCase__(__snake_case = 1 ,__snake_case = 1 ,__snake_case = 1 ) -> qiskit.result.counts.Counts: '''simple docstring''' if ( isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) or isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) or isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(UpperCAmelCase__ ) != input_a) or (math.floor(UpperCAmelCase__ ) != input_a) or (math.floor(UpperCAmelCase__ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers lowerCamelCase__ = qiskit.QuantumRegister(4 ,'''qr''' ) lowerCamelCase__ = qiskit.ClassicalRegister(2 ,'''cr''' ) # list the entries lowerCamelCase__ = [input_a, input_a, carry_in] lowerCamelCase__ = qiskit.QuantumCircuit(UpperCAmelCase__ ,UpperCAmelCase__ ) for i in range(0 ,3 ): if entry[i] == 2: quantum_circuit.h(UpperCAmelCase__ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(UpperCAmelCase__ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(UpperCAmelCase__ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 ,1 ,3 ) # ccx = toffoli gate quantum_circuit.cx(0 ,1 ) quantum_circuit.ccx(1 ,2 ,3 ) quantum_circuit.cx(1 ,2 ) quantum_circuit.cx(0 ,1 ) quantum_circuit.measure([2, 3] ,UpperCAmelCase__ ) # measure the last two qbits lowerCamelCase__ = qiskit.Aer.get_backend('''aer_simulator''' ) lowerCamelCase__ = qiskit.execute(UpperCAmelCase__ ,UpperCAmelCase__ ,shots=1000 ) return job.result().get_counts(UpperCAmelCase__ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
707
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None: '''simple docstring''' if start is None: lowerCamelCase__ = 0 if end is None: lowerCamelCase__ = len(__snake_case ) - 1 if start >= end: return lowerCamelCase__ = (start + end) // 2 slowsort(__snake_case ,__snake_case ,__snake_case ) slowsort(__snake_case ,mid + 1 ,__snake_case ) if sequence[end] < sequence[mid]: lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end] slowsort(__snake_case ,__snake_case ,end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
29
0
def lowerCAmelCase__(__snake_case = 10 ,__snake_case = 22 ) -> int: '''simple docstring''' lowerCamelCase__ = range(1 ,_lowercase ) lowerCamelCase__ = range(1 ,_lowercase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f"""{solution(10, 22) = }""")
708
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float: '''simple docstring''' if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float: '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float: '''simple docstring''' if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( __snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
29
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class __A ( unittest.TestCase , UpperCAmelCase__ ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = load_tool('''text-classification''' ) self.tool.setup() lowerCamelCase__ = load_tool('''text-classification''' , remote=__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] ) self.assertEqual(__lowerCAmelCase , '''positive''' ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] ) self.assertEqual(__lowerCAmelCase , '''positive''' ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] ) self.assertEqual(__lowerCAmelCase , '''positive''' ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] ) self.assertEqual(__lowerCAmelCase , '''positive''' )
709
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' def wrapper(*__snake_case ,**__snake_case ): lowerCamelCase__ = timeit.default_timer() lowerCamelCase__ = func(*__snake_case ,**__snake_case ) lowerCamelCase__ = timeit.default_timer() - starttime return delta lowerCamelCase__ = func.__name__ return wrapper def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = [] lowerCamelCase__ = seq_shapes or {} for i in range(__snake_case ): lowerCamelCase__ = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__snake_case ,_ArrayXD ): lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__snake_case ,datasets.Value ): if v.dtype == "string": lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.''' else: lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item() elif isinstance(__snake_case ,datasets.Sequence ): while isinstance(__snake_case ,datasets.Sequence ): lowerCamelCase__ = v.feature lowerCamelCase__ = seq_shapes[k] lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype ) lowerCamelCase__ = data dummy_data.append((i, example) ) return dummy_data def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str: '''simple docstring''' lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case ) with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer: for key, record in dummy_data: lowerCamelCase__ = features.encode_example(__snake_case ) writer.write(__snake_case ) lowerCamelCase__ , lowerCamelCase__ = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' ) lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) ) return dataset
29
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class __A ( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=True , __lowerCAmelCase=1 / 2_5_5 , __lowerCAmelCase=True , ): '''simple docstring''' lowerCamelCase__ = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std lowerCamelCase__ = do_rescale lowerCamelCase__ = rescale_factor lowerCamelCase__ = do_pad def __lowerCamelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False ): '''simple docstring''' if not batched: lowerCamelCase__ = image_inputs[0] if isinstance(UpperCAmelCase_ , Image.Image ): lowerCamelCase__ , lowerCamelCase__ = image.size else: lowerCamelCase__ , lowerCamelCase__ = image.shape[1], image.shape[2] if w < h: lowerCamelCase__ = int(self.size['''shortest_edge'''] * h / w ) lowerCamelCase__ = self.size['''shortest_edge'''] elif w > h: lowerCamelCase__ = self.size['''shortest_edge'''] lowerCamelCase__ = int(self.size['''shortest_edge'''] * w / h ) else: lowerCamelCase__ = self.size['''shortest_edge'''] lowerCamelCase__ = self.size['''shortest_edge'''] else: lowerCamelCase__ = [] for image in image_inputs: lowerCamelCase__ , lowerCamelCase__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase__ = max(UpperCAmelCase_ , key=lambda __lowerCAmelCase : item[0] )[0] lowerCamelCase__ = max(UpperCAmelCase_ , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __A ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ConditionalDetrImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ConditionalDetrImageProcessingTester(self ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''size''' ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} ) self.assertEqual(image_processor.do_pad , UpperCAmelCase_ ) lowerCamelCase__ = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=UpperCAmelCase_ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} ) self.assertEqual(image_processor.do_pad , UpperCAmelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) lowerCamelCase__ = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: lowerCamelCase__ = json.loads(f.read() ) lowerCamelCase__ = {'''image_id''': 3_9_7_6_9, '''annotations''': target} # encode them lowerCamelCase__ = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) lowerCamelCase__ = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors='''pt''' ) # verify pixel values lowerCamelCase__ = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase_ ) lowerCamelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) # verify area lowerCamelCase__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase_ ) ) # verify boxes lowerCamelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase_ ) lowerCamelCase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase_ , atol=1E-3 ) ) # verify image_id lowerCamelCase__ = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase_ ) ) # verify is_crowd lowerCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase_ ) ) # verify class_labels lowerCamelCase__ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase_ ) ) # verify orig_size lowerCamelCase__ = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase_ ) ) # verify size lowerCamelCase__ = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase_ ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: lowerCamelCase__ = json.loads(f.read() ) lowerCamelCase__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target} lowerCamelCase__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowerCamelCase__ = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) lowerCamelCase__ = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors='''pt''' ) # verify pixel values lowerCamelCase__ = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase_ ) lowerCamelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) # verify area lowerCamelCase__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase_ ) ) # verify boxes lowerCamelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase_ ) lowerCamelCase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase_ , atol=1E-3 ) ) # verify image_id lowerCamelCase__ = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase_ ) ) # verify is_crowd lowerCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase_ ) ) # verify class_labels lowerCamelCase__ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase_ ) ) # verify masks lowerCamelCase__ = 8_2_2_8_7_3 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase_ ) # verify orig_size lowerCamelCase__ = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase_ ) ) # verify size lowerCamelCase__ = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase_ ) )
710
def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 ,len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase__ = grid[0] for row_n in range(1 ,len(__snake_case ) ): lowerCamelCase__ = grid[row_n] lowerCamelCase__ = fill_row(__snake_case ,__snake_case ) lowerCamelCase__ = grid[row_n] return grid[-1][-1] def lowerCAmelCase__(__snake_case ,__snake_case ) -> list: '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 ,len(__snake_case ) ): current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
29
0
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _a = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _a = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _a = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_000)) def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] ) return (item, float(_lowercase )) def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = random.randint(0 ,len(_lowercase ) - 1 ) lowerCamelCase__ = parent_a[:random_slice] + parent_a[random_slice:] lowerCamelCase__ = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict: '''simple docstring''' lowerCamelCase__ = list(_lowercase ) if random.uniform(0 ,1 ) < MUTATION_PROBABILITY: lowerCamelCase__ = random.choice(_lowercase ) return "".join(_lowercase ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> Any: '''simple docstring''' lowerCamelCase__ = [] # Generate more children proportionally to the fitness score. lowerCamelCase__ = int(parent_a[1] * 100 ) + 1 lowerCamelCase__ = 10 if child_n >= 10 else child_n for _ in range(_lowercase ): lowerCamelCase__ = population_score[random.randint(0 ,_lowercase )][0] lowerCamelCase__ = crossover(parent_a[0] ,_lowercase ) # Append new string to the population list. pop.append(mutate(_lowercase ,_lowercase ) ) pop.append(mutate(_lowercase ,_lowercase ) ) return pop def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case = True ) -> List[Any]: '''simple docstring''' if N_POPULATION < N_SELECTED: lowerCamelCase__ = F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_lowercase ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCamelCase__ = sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCamelCase__ = F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_lowercase ) # Generate random starting population. lowerCamelCase__ = [] for _ in range(_lowercase ): population.append(''''''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCamelCase__ = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_lowercase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCamelCase__ = [evaluate(_lowercase ,_lowercase ) for item in population] # Check if there is a matching evolution. lowerCamelCase__ = sorted(_lowercase ,key=lambda __snake_case : x[1] ,reverse=_lowercase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCamelCase__ = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_lowercase ) # Normalize population score to be between 0 and 1. lowerCamelCase__ = [ (item, score / len(_lowercase )) for item, score in population_score ] # This is selection for i in range(_lowercase ): population.extend(select(population_score[int(_lowercase )] ,_lowercase ,_lowercase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_lowercase ) > N_POPULATION: break if __name__ == "__main__": _a = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) _a = list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\" ) _a = basic(target_str, genes_list) print( f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
711
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures _a = logging.get_logger(__name__) @dataclass class __A : '''simple docstring''' lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} ) lowerCAmelCase_ = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) lowerCAmelCase_ = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.task_name.lower() class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """train""" lowerCAmelCase_ = """dev""" lowerCAmelCase_ = """test""" class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ): '''simple docstring''' warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , ) lowerCamelCase__ = args lowerCamelCase__ = glue_processors[args.task_name]() lowerCamelCase__ = glue_output_modes[args.task_name] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): try: lowerCamelCase__ = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file lowerCamelCase__ = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) lowerCamelCase__ = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1] lowerCamelCase__ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ = cached_features_file + '''.lock''' with FileLock(__lowerCAmelCase ): if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache: lowerCamelCase__ = time.time() lowerCamelCase__ = torch.load(__lowerCAmelCase ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCamelCase__ = self.processor.get_test_examples(args.data_dir ) else: lowerCamelCase__ = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCamelCase__ = examples[:limit_length] lowerCamelCase__ = glue_convert_examples_to_features( __lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , ) lowerCamelCase__ = time.time() torch.save(self.features , __lowerCAmelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , __lowerCAmelCase ): '''simple docstring''' return self.features[i] def __lowerCamelCase ( self ): '''simple docstring''' return self.label_list
29
0
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Dict: '''simple docstring''' lowerCamelCase__ = RemBertConfig.from_json_file(__UpperCamelCase ) print('''Building PyTorch model from configuration: {}'''.format(str(__UpperCamelCase ) ) ) lowerCamelCase__ = RemBertModel(__UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Save pytorch-model print('''Save PyTorch model to {}'''.format(__UpperCamelCase ) ) torch.save(model.state_dict() ,__UpperCamelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _a = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
712
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _a = datasets.logging.get_logger(__name__) _a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n" _a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n" _a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n" def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = {doc: key_lines} lowerCamelCase__ = {doc: sys_lines} lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case ) key_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case ) lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case ) if remove_nested: lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case ) lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case ) lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 for name, metric in metrics: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} ) logger.info( name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,) if conll_subparts_num == 3: lowerCamelCase__ = (conll / 3) * 100 logger.info(F'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: lowerCamelCase__ = line.split()[5] if not parse_col == "-": lowerCamelCase__ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Sequence(datasets.Value('''string''' ) ), } ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ): '''simple docstring''' lowerCamelCase__ = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase ) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCamelCase__ = evaluate( key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , ) return score
29
0
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __A ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' @register_to_config def __init__( self , *, __lowerCAmelCase = 4 , __lowerCAmelCase = 7_6_8 , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' super().__init__() lowerCamelCase__ = nn.Parameter(torch.zeros(__lowerCAmelCase ) ) # parameters for additional clip time embeddings lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) # parameters for encoder hidden states lowerCamelCase__ = clip_extra_context_tokens lowerCamelCase__ = nn.Linear( __lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim ) lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = nn.LayerNorm(__lowerCAmelCase ) def __lowerCamelCase ( self , *, __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowerCamelCase__ = image_embeddings.shape[0] lowerCamelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowerCamelCase__ = classifier_free_guidance_embeddings.expand( __lowerCAmelCase , -1 ) lowerCamelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowerCamelCase__ = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowerCamelCase__ = self.embedding_proj(__lowerCAmelCase ) lowerCamelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__lowerCAmelCase ) lowerCamelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowerCamelCase__ = self.clip_extra_context_tokens_proj(__lowerCAmelCase ) lowerCamelCase__ = clip_extra_context_tokens.reshape(__lowerCAmelCase , -1 , self.clip_extra_context_tokens ) lowerCamelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 ) lowerCamelCase__ = self.encoder_hidden_states_proj(__lowerCAmelCase ) lowerCamelCase__ = self.text_encoder_hidden_states_norm(__lowerCAmelCase ) lowerCamelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
713
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests _a = open # noqa: we just need to have a builtin inside this module to test it properly
29
0
class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = name lowerCamelCase__ = value lowerCamelCase__ = weight def __repr__( self ): '''simple docstring''' return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def __lowerCamelCase ( self ): '''simple docstring''' return self.value def __lowerCamelCase ( self ): '''simple docstring''' return self.name def __lowerCamelCase ( self ): '''simple docstring''' return self.weight def __lowerCamelCase ( self ): '''simple docstring''' return self.value / self.weight def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): menu.append(Things(name[i] ,value[i] ,weight[i] ) ) return menu def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = sorted(SCREAMING_SNAKE_CASE_ ,key=SCREAMING_SNAKE_CASE_ ,reverse=SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = [] lowerCamelCase__ , lowerCamelCase__ = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def lowerCAmelCase__() -> Tuple: '''simple docstring''' pass if __name__ == "__main__": import doctest doctest.testmod()
714
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging _a = logging.get_logger(__name__) class __A : '''simple docstring''' lowerCAmelCase_ = None @experimental def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case ) lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits) for index in range(__snake_case ): lowerCamelCase__ = len(__snake_case ) // num_proc lowerCamelCase__ = len(__snake_case ) % num_proc lowerCamelCase__ = div * index + min(__snake_case ,__snake_case ) lowerCamelCase__ = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F'Error dividing inputs iterable among processes. ' F'Total number of objects {len(__snake_case )}, ' F'length: {sum(len(i[1] ) for i in split_kwds )}' ) logger.info( F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' ) lowerCamelCase__ , lowerCamelCase__ = None, None if not disable_tqdm: lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool: lowerCamelCase__ = pool.map(__snake_case ,__snake_case ) logger.info(F'Finished {num_proc} processes' ) lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res] logger.info(F'Unpacked {len(__snake_case )} objects' ) return mapped def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ): return joblib.Parallel()( joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: lowerCamelCase__ = None
29
0
from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP _a = logging.get_logger(__name__) # pylint: disable=invalid-name _a = """ Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\") >>> pipe_prior.to(\"cuda\") >>> prompt = \"red cat, 4k photo\" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\") >>> pipe.to(\"cuda\") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save(\"cat.png\") ``` """ def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=8 ) -> Dict: '''simple docstring''' lowerCamelCase__ = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 lowerCamelCase__ = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class __A ( __snake_case ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' super().__init__() self.register_modules( text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , movq=A_ , ) lowerCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if latents is None: lowerCamelCase__ = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ ) else: if latents.shape != shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' ) lowerCamelCase__ = latents.to(A_ ) lowerCamelCase__ = latents * scheduler.init_noise_sigma return latents def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , ): '''simple docstring''' lowerCamelCase__ = len(A_ ) if isinstance(A_ , A_ ) else 1 # get prompt text embeddings lowerCamelCase__ = self.tokenizer( A_ , padding='''max_length''' , truncation=A_ , max_length=7_7 , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors='''pt''' , ) lowerCamelCase__ = text_inputs.input_ids lowerCamelCase__ = self.tokenizer(A_ , padding='''longest''' , return_tensors='''pt''' ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A_ , A_ ): lowerCamelCase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) lowerCamelCase__ = text_input_ids.to(A_ ) lowerCamelCase__ = text_inputs.attention_mask.to(A_ ) lowerCamelCase__ = self.text_encoder( input_ids=A_ , attention_mask=A_ ) lowerCamelCase__ = prompt_embeds.repeat_interleave(A_ , dim=0 ) lowerCamelCase__ = text_encoder_hidden_states.repeat_interleave(A_ , dim=0 ) lowerCamelCase__ = text_mask.repeat_interleave(A_ , dim=0 ) if do_classifier_free_guidance: lowerCamelCase__ = 42 if negative_prompt is None: lowerCamelCase__ = [""] * batch_size elif type(A_ ) is not type(A_ ): raise TypeError( F'`negative_prompt` should be the same type to `prompt`, but got {type(A_ )} !=' F' {type(A_ )}.' ) elif isinstance(A_ , A_ ): lowerCamelCase__ = [negative_prompt] elif batch_size != len(A_ ): raise ValueError( F'`negative_prompt`: {negative_prompt} has batch size {len(A_ )}, but `prompt`:' F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches' ''' the batch size of `prompt`.''' ) else: lowerCamelCase__ = negative_prompt lowerCamelCase__ = self.tokenizer( A_ , padding='''max_length''' , max_length=7_7 , truncation=A_ , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors='''pt''' , ) lowerCamelCase__ = uncond_input.input_ids.to(A_ ) lowerCamelCase__ = uncond_input.attention_mask.to(A_ ) lowerCamelCase__ = self.text_encoder( input_ids=A_ , attention_mask=A_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowerCamelCase__ = negative_prompt_embeds.shape[1] lowerCamelCase__ = negative_prompt_embeds.repeat(1 , A_ ) lowerCamelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ ) lowerCamelCase__ = uncond_text_encoder_hidden_states.shape[1] lowerCamelCase__ = uncond_text_encoder_hidden_states.repeat(1 , A_ , 1 ) lowerCamelCase__ = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , A_ , -1 ) lowerCamelCase__ = uncond_text_mask.repeat_interleave(A_ , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCamelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] ) lowerCamelCase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) lowerCamelCase__ = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def __lowerCamelCase ( self , __lowerCAmelCase=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' ) lowerCamelCase__ = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(A_ , A_ ) def __lowerCamelCase ( self , __lowerCAmelCase=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=A_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowerCamelCase__ = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: lowerCamelCase__ = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ ) if self.safety_checker is not None: lowerCamelCase__ = cpu_offload_with_hook(self.safety_checker , A_ , prev_module_hook=A_ ) # We'll offload the last model manually. lowerCamelCase__ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowerCamelCase ( self ): '''simple docstring''' if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(A_ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(A_ ) def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 5_1_2 , __lowerCAmelCase = 5_1_2 , __lowerCAmelCase = 1_0_0 , __lowerCAmelCase = 4.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , ): '''simple docstring''' if isinstance(A_ , A_ ): lowerCamelCase__ = 1 elif isinstance(A_ , A_ ): lowerCamelCase__ = len(A_ ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(A_ )}' ) lowerCamelCase__ = self._execution_device lowerCamelCase__ = batch_size * num_images_per_prompt lowerCamelCase__ = guidance_scale > 1.0 lowerCamelCase__ = self._encode_prompt( A_ , A_ , A_ , A_ , A_ ) if isinstance(A_ , A_ ): lowerCamelCase__ = torch.cat(A_ , dim=0 ) if isinstance(A_ , A_ ): lowerCamelCase__ = torch.cat(A_ , dim=0 ) if do_classifier_free_guidance: lowerCamelCase__ = image_embeds.repeat_interleave(A_ , dim=0 ) lowerCamelCase__ = negative_image_embeds.repeat_interleave(A_ , dim=0 ) lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=A_ ) self.scheduler.set_timesteps(A_ , device=A_ ) lowerCamelCase__ = self.scheduler.timesteps lowerCamelCase__ = self.unet.config.in_channels lowerCamelCase__ = get_new_h_w(A_ , A_ , self.movq_scale_factor ) # create initial latent lowerCamelCase__ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A_ , A_ , A_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(A_ ) ): # expand the latents if we are doing classifier free guidance lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase__ = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} lowerCamelCase__ = self.unet( sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0] if do_classifier_free_guidance: lowerCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 ) lowerCamelCase__ = noise_pred.chunk(2 ) lowerCamelCase__ = variance_pred.chunk(2 ) lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowerCamelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowerCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase__ = self.scheduler.step( A_ , A_ , A_ , generator=A_ , ).prev_sample # post-processing lowerCamelCase__ = self.movq.decode(A_ , force_not_quantize=A_ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: lowerCamelCase__ = image * 0.5 + 0.5 lowerCamelCase__ = image.clamp(0 , 1 ) lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCamelCase__ = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
715
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = 42 class __A ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' @register_to_config def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ): '''simple docstring''' super().__init__() lowerCamelCase__ = num_attention_heads lowerCamelCase__ = attention_head_dim lowerCamelCase__ = num_attention_heads * attention_head_dim lowerCamelCase__ = in_channels lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase ) lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) # 3. Define transformers blocks lowerCamelCase__ = nn.ModuleList( [ BasicTransformerBlock( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , ) for d in range(__lowerCAmelCase ) ] ) lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape lowerCamelCase__ = batch_frames // num_frames lowerCamelCase__ = hidden_states lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) lowerCamelCase__ = self.norm(__lowerCAmelCase ) lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = self.proj_in(__lowerCAmelCase ) # 2. Blocks for block in self.transformer_blocks: lowerCamelCase__ = block( __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , ) # 3. Output lowerCamelCase__ = self.proj_out(__lowerCAmelCase ) lowerCamelCase__ = ( hidden_states[None, None, :] .reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
29
0
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[int]: '''simple docstring''' lowerCamelCase__ = RemBertConfig.from_json_file(__snake_case ) print('''Building PyTorch model from configuration: {}'''.format(str(__snake_case ) ) ) lowerCamelCase__ = RemBertModel(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_rembert(__snake_case ,__snake_case ,__snake_case ) # Save pytorch-model print('''Save PyTorch model to {}'''.format(__snake_case ) ) torch.save(model.state_dict() ,__snake_case ) if __name__ == "__main__": _a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _a : List[str] = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
716
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _a = [{"type": "code", "content": INSTALL_CONTENT}] _a = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
29
0
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __A ( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=1_8 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , ): '''simple docstring''' lowerCamelCase__ = size if size is not None else {"height": 1_8, "width": 1_8} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = apply_ocr def __lowerCamelCase ( self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __A ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = LayoutLMvaImageProcessingTester(self ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''apply_ocr''' ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , lowerCamelCase__ ) self.assertIsInstance(encoding.boxes , lowerCamelCase__ ) # Test batched lowerCamelCase__ = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched lowerCamelCase__ = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched lowerCamelCase__ = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCamelCase__ = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) lowerCamelCase__ = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) lowerCamelCase__ = image_processing(lowerCamelCase__ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCamelCase__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowerCamelCase__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , lowerCamelCase__ ) self.assertListEqual(encoding.boxes , lowerCamelCase__ ) # with apply_OCR = False lowerCamelCase__ = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) lowerCamelCase__ = image_processing(lowerCamelCase__ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
717
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _a = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
29
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { "microsoft/trocr-base-handwritten": ( "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __A ( __a ): '''simple docstring''' lowerCAmelCase_ = "trocr" lowerCAmelCase_ = ["past_key_values"] lowerCAmelCase_ = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self , __lowerCAmelCase=5_0_2_6_5 , __lowerCAmelCase=1_0_2_4 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=4_0_9_6 , __lowerCAmelCase="gelu" , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = vocab_size lowerCamelCase__ = d_model lowerCamelCase__ = decoder_layers lowerCamelCase__ = decoder_attention_heads lowerCamelCase__ = decoder_ffn_dim lowerCamelCase__ = activation_function lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = dropout lowerCamelCase__ = attention_dropout lowerCamelCase__ = activation_dropout lowerCamelCase__ = init_std lowerCamelCase__ = decoder_layerdrop lowerCamelCase__ = use_cache lowerCamelCase__ = scale_embedding lowerCamelCase__ = use_learned_position_embeddings lowerCamelCase__ = layernorm_embedding super().__init__( pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
718
import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor _a = logging.get_logger(__name__) class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
29
0
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated _a = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ _a = 'https://storage.googleapis.com/cvdf-datasets/mnist/' def lowerCAmelCase__(__snake_case ) -> Any: '''simple docstring''' lowerCamelCase__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) ,dtype=UpperCamelCase__ )[0] @deprecated(UpperCamelCase__ ,'''Please use tf.data to implement this functionality.''' ) def lowerCAmelCase__(__snake_case ) -> Any: '''simple docstring''' print('''Extracting''' ,f.name ) with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream: lowerCamelCase__ = _readaa(UpperCamelCase__ ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) lowerCamelCase__ = _readaa(UpperCamelCase__ ) lowerCamelCase__ = _readaa(UpperCamelCase__ ) lowerCamelCase__ = _readaa(UpperCamelCase__ ) lowerCamelCase__ = bytestream.read(rows * cols * num_images ) lowerCamelCase__ = numpy.frombuffer(UpperCamelCase__ ,dtype=numpy.uinta ) lowerCamelCase__ = data.reshape(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,1 ) return data @deprecated(UpperCamelCase__ ,'''Please use tf.one_hot on tensors.''' ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = labels_dense.shape[0] lowerCamelCase__ = numpy.arange(UpperCamelCase__ ) * num_classes lowerCamelCase__ = numpy.zeros((num_labels, num_classes) ) lowerCamelCase__ = 1 return labels_one_hot @deprecated(UpperCamelCase__ ,'''Please use tf.data to implement this functionality.''' ) def lowerCAmelCase__(__snake_case ,__snake_case=False ,__snake_case=10 ) -> str: '''simple docstring''' print('''Extracting''' ,f.name ) with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream: lowerCamelCase__ = _readaa(UpperCamelCase__ ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) lowerCamelCase__ = _readaa(UpperCamelCase__ ) lowerCamelCase__ = bytestream.read(UpperCamelCase__ ) lowerCamelCase__ = numpy.frombuffer(UpperCamelCase__ ,dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(UpperCamelCase__ ,UpperCamelCase__ ) return labels class __A : '''simple docstring''' @deprecated( __a , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=dtypes.floataa , __lowerCAmelCase=True , __lowerCAmelCase=None , ): '''simple docstring''' lowerCamelCase__ = random_seed.get_seed(__a ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) lowerCamelCase__ = dtypes.as_dtype(__a ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: lowerCamelCase__ = 1_0_0_0_0 lowerCamelCase__ = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' lowerCamelCase__ = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 lowerCamelCase__ = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. lowerCamelCase__ = images.astype(numpy.floataa ) lowerCamelCase__ = numpy.multiply(__a , 1.0 / 2_5_5.0 ) lowerCamelCase__ = images lowerCamelCase__ = labels lowerCamelCase__ = 0 lowerCamelCase__ = 0 @property def __lowerCamelCase ( self ): '''simple docstring''' return self._images @property def __lowerCamelCase ( self ): '''simple docstring''' return self._labels @property def __lowerCamelCase ( self ): '''simple docstring''' return self._num_examples @property def __lowerCamelCase ( self ): '''simple docstring''' return self._epochs_completed def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ): '''simple docstring''' if fake_data: lowerCamelCase__ = [1] * 7_8_4 lowerCamelCase__ = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__a )], [fake_label for _ in range(__a )], ) lowerCamelCase__ = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: lowerCamelCase__ = numpy.arange(self._num_examples ) numpy.random.shuffle(__a ) lowerCamelCase__ = self.images[perma] lowerCamelCase__ = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch lowerCamelCase__ = self._num_examples - start lowerCamelCase__ = self._images[start : self._num_examples] lowerCamelCase__ = self._labels[start : self._num_examples] # Shuffle the data if shuffle: lowerCamelCase__ = numpy.arange(self._num_examples ) numpy.random.shuffle(__a ) lowerCamelCase__ = self.images[perm] lowerCamelCase__ = self.labels[perm] # Start next epoch lowerCamelCase__ = 0 lowerCamelCase__ = batch_size - rest_num_examples lowerCamelCase__ = self._index_in_epoch lowerCamelCase__ = self._images[start:end] lowerCamelCase__ = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size lowerCamelCase__ = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(UpperCamelCase__ ,'''Please write your own downloading logic.''' ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Any: '''simple docstring''' if not gfile.Exists(UpperCamelCase__ ): gfile.MakeDirs(UpperCamelCase__ ) lowerCamelCase__ = os.path.join(UpperCamelCase__ ,UpperCamelCase__ ) if not gfile.Exists(UpperCamelCase__ ): urllib.request.urlretrieve(UpperCamelCase__ ,UpperCamelCase__ ) # noqa: S310 with gfile.GFile(UpperCamelCase__ ) as f: lowerCamelCase__ = f.size() print('''Successfully downloaded''' ,UpperCamelCase__ ,UpperCamelCase__ ,'''bytes.''' ) return filepath @deprecated( UpperCamelCase__ ,'''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def lowerCAmelCase__(__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=dtypes.floataa ,__snake_case=True ,__snake_case=5000 ,__snake_case=None ,__snake_case=DEFAULT_SOURCE_URL ,) -> Tuple: '''simple docstring''' if fake_data: def fake(): return _DataSet( [] ,[] ,fake_data=UpperCamelCase__ ,one_hot=UpperCamelCase__ ,dtype=UpperCamelCase__ ,seed=UpperCamelCase__ ) lowerCamelCase__ = fake() lowerCamelCase__ = fake() lowerCamelCase__ = fake() return _Datasets(train=UpperCamelCase__ ,validation=UpperCamelCase__ ,test=UpperCamelCase__ ) if not source_url: # empty string check lowerCamelCase__ = DEFAULT_SOURCE_URL lowerCamelCase__ = """train-images-idx3-ubyte.gz""" lowerCamelCase__ = """train-labels-idx1-ubyte.gz""" lowerCamelCase__ = """t10k-images-idx3-ubyte.gz""" lowerCamelCase__ = """t10k-labels-idx1-ubyte.gz""" lowerCamelCase__ = _maybe_download( UpperCamelCase__ ,UpperCamelCase__ ,source_url + train_images_file ) with gfile.Open(UpperCamelCase__ ,'''rb''' ) as f: lowerCamelCase__ = _extract_images(UpperCamelCase__ ) lowerCamelCase__ = _maybe_download( UpperCamelCase__ ,UpperCamelCase__ ,source_url + train_labels_file ) with gfile.Open(UpperCamelCase__ ,'''rb''' ) as f: lowerCamelCase__ = _extract_labels(UpperCamelCase__ ,one_hot=UpperCamelCase__ ) lowerCamelCase__ = _maybe_download( UpperCamelCase__ ,UpperCamelCase__ ,source_url + test_images_file ) with gfile.Open(UpperCamelCase__ ,'''rb''' ) as f: lowerCamelCase__ = _extract_images(UpperCamelCase__ ) lowerCamelCase__ = _maybe_download( UpperCamelCase__ ,UpperCamelCase__ ,source_url + test_labels_file ) with gfile.Open(UpperCamelCase__ ,'''rb''' ) as f: lowerCamelCase__ = _extract_labels(UpperCamelCase__ ,one_hot=UpperCamelCase__ ) if not 0 <= validation_size <= len(UpperCamelCase__ ): lowerCamelCase__ = ( """Validation size should be between 0 and """ F'{len(UpperCamelCase__ )}. Received: {validation_size}.' ) raise ValueError(UpperCamelCase__ ) lowerCamelCase__ = train_images[:validation_size] lowerCamelCase__ = train_labels[:validation_size] lowerCamelCase__ = train_images[validation_size:] lowerCamelCase__ = train_labels[validation_size:] lowerCamelCase__ = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed} lowerCamelCase__ = _DataSet(UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ) lowerCamelCase__ = _DataSet(UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ) lowerCamelCase__ = _DataSet(UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ) return _Datasets(train=UpperCamelCase__ ,validation=UpperCamelCase__ ,test=UpperCamelCase__ )
719
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any: '''simple docstring''' lowerCamelCase__ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCamelCase__ = { '''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2], '''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1], '''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5], } lowerCamelCase__ = F'{src_lang}-{tgt_lang}' lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n' model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case ) lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' ) print(F'Generating {path}' ) with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f: f.write(__snake_case ) # make sure we are under the root of the project _a = Path(__file__).resolve().parent.parent.parent _a = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: _a = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
29
0
from __future__ import annotations import math def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Dict: '''simple docstring''' if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(_UpperCamelCase ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 ,node_index * 2 ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) ,) return min( minimax(depth + 1 ,node_index * 2 ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) ,) def lowerCAmelCase__() -> str: '''simple docstring''' lowerCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423] lowerCamelCase__ = math.log(len(_UpperCamelCase ) ,2 ) print('''Optimal value : ''' ,end='''''' ) print(minimax(0 ,0 ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
720
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _a = logging.get_logger(__name__) class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
29
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", } class __A ( __a ): '''simple docstring''' lowerCAmelCase_ = """funnel""" lowerCAmelCase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", } def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=[4, 4, 4] , __lowerCAmelCase=None , __lowerCAmelCase=2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=6_4 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=None , __lowerCAmelCase=1E-9 , __lowerCAmelCase="mean" , __lowerCAmelCase="relative_shift" , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = vocab_size lowerCamelCase__ = block_sizes lowerCamelCase__ = [1] * len(A__ ) if block_repeats is None else block_repeats assert len(A__ ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." lowerCamelCase__ = num_decoder_layers lowerCamelCase__ = d_model lowerCamelCase__ = n_head lowerCamelCase__ = d_head lowerCamelCase__ = d_inner lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout lowerCamelCase__ = attention_dropout lowerCamelCase__ = activation_dropout lowerCamelCase__ = initializer_range lowerCamelCase__ = initializer_std lowerCamelCase__ = layer_norm_eps assert pooling_type in [ "mean", "max", ], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.' lowerCamelCase__ = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.' lowerCamelCase__ = attention_type lowerCamelCase__ = separate_cls lowerCamelCase__ = truncate_seq lowerCamelCase__ = pool_q_only super().__init__(**A__ ) @property def __lowerCamelCase ( self ): '''simple docstring''' return sum(self.block_sizes ) @num_hidden_layers.setter def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' ) @property def __lowerCamelCase ( self ): '''simple docstring''' return len(self.block_sizes ) @num_blocks.setter def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
721
from queue import PriorityQueue from typing import Any import numpy as np def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int: '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf ) lowerCamelCase__ = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowerCamelCase__ = new_cost_f lowerCamelCase__ = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = -1 lowerCamelCase__ = set() lowerCamelCase__ = set() lowerCamelCase__ = {source: 0} lowerCamelCase__ = {destination: 0} lowerCamelCase__ = {source: None} lowerCamelCase__ = {destination: None} lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase__ , lowerCamelCase__ = queue_forward.get() visited_forward.add(__snake_case ) lowerCamelCase__ , lowerCamelCase__ = queue_backward.get() visited_backward.add(__snake_case ) lowerCamelCase__ = pass_and_relaxation( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) lowerCamelCase__ = pass_and_relaxation( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase__ = shortest_distance return shortest_path_distance _a = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } _a = { "B": [["E", 1]], "C": [["B", 1]], "D": [["C", 1]], "F": [["D", 1], ["G", 1]], "E": [[None, np.inf]], "G": [["E", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
29
0
_a = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] _a = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] _a = { 0: "Sunday", 1: "Monday", 2: "Tuesday", 3: "Wednesday", 4: "Thursday", 5: "Friday", 6: "Saturday", } def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str: '''simple docstring''' assert len(str(_SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: lowerCamelCase__ = year // 100 lowerCamelCase__ = (5 * (century % 4) + 2) % 7 lowerCamelCase__ = year % 100 lowerCamelCase__ = centurian % 12 lowerCamelCase__ = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 lowerCamelCase__ = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) lowerCamelCase__ = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
700
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """ClapFeatureExtractor""" lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if audios is not None: lowerCamelCase__ = self.feature_extractor( __lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and audios is not None: lowerCamelCase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer.model_input_names lowerCamelCase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
29
0
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) _a = "\\n Text data.\n Second line of data." _a = "file" @pytest.fixture(scope='''session''' ) def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') lowerCamelCase__ = bytes(lowercase__ ,'''utf-8''' ) with zstd.open(lowercase__ ,'''wb''' ) as f: f.write(lowercase__ ) return path @pytest.fixture def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir ,lowercase__ ) ,'''w''' ) as f: f.write(lowercase__ ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' ,['''gzip''', '''xz''', '''zstd'''] ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} lowerCamelCase__ = input_paths[compression_format] lowerCamelCase__ = tmp_path / '''cache''' lowerCamelCase__ = DownloadConfig(cache_dir=lowercase__ ,extract_compressed_file=lowercase__ ) lowerCamelCase__ = cached_path(lowercase__ ,download_config=lowercase__ ) with open(lowercase__ ) as f: lowerCamelCase__ = f.read() with open(lowercase__ ) as f: lowerCamelCase__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' ,[True, False] ) @pytest.mark.parametrize('''default_cache_dir''' ,[True, False] ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any: '''simple docstring''' lowerCamelCase__ = '''custom_cache''' lowerCamelCase__ = '''custom_extracted_dir''' lowerCamelCase__ = tmp_path / '''custom_extracted_path''' if default_extracted: lowerCamelCase__ = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' ,lowercase__ ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' ,str(lowercase__ ) ) lowerCamelCase__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) lowerCamelCase__ = xz_file lowerCamelCase__ = ( DownloadConfig(extract_compressed_file=lowercase__ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=lowercase__ ) ) lowerCamelCase__ = cached_path(lowercase__ ,download_config=lowercase__ ) assert Path(lowercase__ ).parent.parts[-2:] == expected def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' lowerCamelCase__ = str(Path(lowercase__ ).resolve() ) assert cached_path(lowercase__ ) == text_file # relative path lowerCamelCase__ = str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(lowercase__ ) == text_file def lowerCAmelCase__(__snake_case ) -> Any: '''simple docstring''' lowerCamelCase__ = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(lowercase__ ): cached_path(lowercase__ ) # relative path lowerCamelCase__ = '''./__missing_file__.txt''' with pytest.raises(lowercase__ ): cached_path(lowercase__ ) def lowerCAmelCase__(__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = get_from_cache(F'tmp://{tmpfs_file}' ) with open(lowercase__ ) as f: lowerCamelCase__ = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,lowercase__ ) def lowerCAmelCase__() -> Optional[Any]: '''simple docstring''' with pytest.raises(lowercase__ ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,lowercase__ ) def lowerCAmelCase__(__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(lowercase__ ): http_get('''https://huggingface.co''' ,temp_file=lowercase__ ) with pytest.raises(lowercase__ ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,lowercase__ ) def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(lowercase__ ): ftp_get('''ftp://huggingface.co''' ,temp_file=lowercase__ ) with pytest.raises(lowercase__ ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,lowercase__ ) def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(lowercase__ ): fsspec_get('''s3://huggingface.co''' ,temp_file=lowercase__ ) with pytest.raises(lowercase__ ): fsspec_head('''s3://huggingface.co''' )
701
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope lowerCamelCase__ = projection_dim def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFDPRModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' ) lowerCamelCase__ = tf.constant( [[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP] lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ 0.0323_6253, 0.1275_3335, 0.1681_8509, 0.0027_9786, 0.389_6933, 0.2426_4945, 0.217_8971, -0.0233_5227, -0.0848_1959, -0.1432_4117, ] ] ) self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class __A ( a__ ): '''simple docstring''' def __lt__( self , __lowerCAmelCase ): '''simple docstring''' return self[-1] < other[-1] def __eq__( self , __lowerCAmelCase ): '''simple docstring''' return self[-1] == other[-1] def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' lowerCamelCase__ = [] # sort into stacks for element in collection: lowerCamelCase__ = Stack([element] ) lowerCamelCase__ = bisect_left(lowerCAmelCase_ ,lowerCAmelCase_ ) if i != len(lowerCAmelCase_ ): stacks[i].append(lowerCAmelCase_ ) else: stacks.append(lowerCAmelCase_ ) # use a heap-based merge to merge stack efficiently lowerCamelCase__ = merge(*(reversed(lowerCAmelCase_ ) for stack in stacks) ) return collection if __name__ == "__main__": _a = input("Enter numbers separated by a comma:\n").strip() _a = [int(item) for item in user_input.split(",")] print(patience_sort(unsorted))
702
import string from math import logaa def lowerCAmelCase__(__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = document.translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' ) lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]: '''simple docstring''' lowerCamelCase__ = corpus.lower().translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with '' lowerCamelCase__ = corpus_without_punctuation.split('''\n''' ) lowerCamelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__snake_case )) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float: '''simple docstring''' if smoothing: if n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError('''df must be > 0''' ) elif n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(logaa(n / df ) ,3 ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> float: '''simple docstring''' return round(tf * idf ,3 )
29
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device _a = False class __A ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class __A ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) lowerCamelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = pipe( image=lowercase__ , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images lowerCamelCase__ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
703
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
29
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _a = { "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "GraphormerForGraphClassification", "GraphormerModel", "GraphormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
704
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _a = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' return (abs(source - target ) / target) < 0.0_1 @pytest.mark.integration def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case ) lowerCamelCase__ = TestCommand(*__snake_case ) test_command.run() lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' ) assert os.path.exists(__snake_case ) lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case ) lowerCamelCase__ = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) ,splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] ,download_size=3940680 ,dataset_size=2589981 ,) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case ,__snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes ) else: result == expected
29
0
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _a = logging.get_logger(__name__) _a = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } _a = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } _a = {"""facebook/blenderbot-3B""": 128} class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ["input_ids", "attention_mask"] lowerCAmelCase_ = BlenderbotTokenizer def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ): '''simple docstring''' super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) lowerCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: lowerCamelCase__ = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) lowerCamelCase__ = add_prefix_space lowerCamelCase__ = pre_tok_class(**__lowerCAmelCase ) lowerCamelCase__ = add_prefix_space lowerCamelCase__ = '''post_processor''' lowerCamelCase__ = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: lowerCamelCase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCamelCase__ = tuple(state['''sep'''] ) if "cls" in state: lowerCamelCase__ = tuple(state['''cls'''] ) lowerCamelCase__ = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: lowerCamelCase__ = add_prefix_space lowerCamelCase__ = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: lowerCamelCase__ = trim_offsets lowerCamelCase__ = True if changes_to_apply: lowerCamelCase__ = getattr(__lowerCAmelCase , state.pop('''type''' ) ) lowerCamelCase__ = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __lowerCamelCase ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value lowerCamelCase__ = value def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(__lowerCAmelCase ) lowerCamelCase__ = ''' '''.join(__lowerCAmelCase ) lowerCamelCase__ = self.encode(__lowerCAmelCase ) if len(__lowerCAmelCase ) > self.model_max_length: lowerCamelCase__ = input_ids[-self.model_max_length :] logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
705
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = 1_3 lowerCamelCase__ = 7 lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = 9_9 lowerCamelCase__ = 3_2 lowerCamelCase__ = 2 lowerCamelCase__ = 4 lowerCamelCase__ = 3_7 lowerCamelCase__ = '''gelu''' lowerCamelCase__ = 0.1 lowerCamelCase__ = 0.1 lowerCamelCase__ = 5_1_2 lowerCamelCase__ = 1_6 lowerCamelCase__ = 2 lowerCamelCase__ = 0.02 lowerCamelCase__ = 3 lowerCamelCase__ = 4 lowerCamelCase__ = None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): '''simple docstring''' ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = self.prepare_config_and_inputs() lowerCamelCase__ = True lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = True lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''encoder_hidden_states''': encoder_hidden_states, '''encoder_attention_mask''': encoder_attention_mask, } lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ) # Also check the case where encoder outputs are not passed lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase ) lowerCamelCase__ = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.num_labels lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__lowerCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCamelCase__ = model.get_bias() assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) for k, v in name.items(): assert isinstance(__lowerCAmelCase , tf.Variable ) else: lowerCamelCase__ = model.get_output_embeddings() assert x is None lowerCamelCase__ = model.get_bias() assert name is None @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] lowerCamelCase__ = [1, 6, 3_3] self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase ) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
import os def lowerCAmelCase__() -> List[str]: '''simple docstring''' with open(os.path.dirname(lowercase_ ) + '''/p022_names.txt''' ) as file: lowerCamelCase__ = str(file.readlines()[0] ) lowerCamelCase__ = names.replace('''\"''' ,'''''' ).split(''',''' ) names.sort() lowerCamelCase__ = 0 lowerCamelCase__ = 0 for i, name in enumerate(lowercase_ ): for letter in name: name_score += ord(lowercase_ ) - 64 total_score += (i + 1) * name_score lowerCamelCase__ = 0 return total_score if __name__ == "__main__": print(solution())
706
from math import sqrt def lowerCAmelCase__(__snake_case ) -> bool: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' must been an int and positive" lowerCamelCase__ = True # 0 and 1 are none primes. if number <= 1: lowerCamelCase__ = False for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCamelCase__ = False break # precondition assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool" return status def lowerCAmelCase__(__snake_case ) -> Any: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCamelCase__ = list(range(2 ,n + 1 ) ) lowerCamelCase__ = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(__snake_case ) ): for j in range(i + 1 ,len(__snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCamelCase__ = 0 # filters actual prime numbers. lowerCamelCase__ = [x for x in begin_list if x != 0] # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2" lowerCamelCase__ = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 ,n + 1 ): if is_prime(__snake_case ): ans.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0" lowerCamelCase__ = [] # this list will be returns of the function. # potential prime number factors. lowerCamelCase__ = 2 lowerCamelCase__ = number if number == 0 or number == 1: ans.append(__snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(__snake_case ): while quotient != 1: if is_prime(__snake_case ) and (quotient % factor == 0): ans.append(__snake_case ) quotient /= factor else: factor += 1 else: ans.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCamelCase__ = 0 # prime factorization of 'number' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = max(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int" return ans def lowerCAmelCase__(__snake_case ) -> Dict: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCamelCase__ = 0 # prime factorization of 'number' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = min(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int" return ans def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case ) ), "'number' must been an int, even and > 2" lowerCamelCase__ = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCamelCase__ = get_prime_numbers(__snake_case ) lowerCamelCase__ = len(__snake_case ) # run variable for while-loops. lowerCamelCase__ = 0 lowerCamelCase__ = None # exit variable. for break up the loops lowerCamelCase__ = True while i < len_pn and loop: lowerCamelCase__ = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCamelCase__ = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(__snake_case ,__snake_case ) and (len(__snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase__(__snake_case ,__snake_case ) -> str: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCamelCase__ = 0 while numbera != 0: lowerCamelCase__ = numbera % numbera lowerCamelCase__ = numbera lowerCamelCase__ = rest # precondition assert isinstance(__snake_case ,__snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCamelCase__ = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = prime_factorization(__snake_case ) elif numbera == 1 or numbera == 1: lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = max(__snake_case ,__snake_case ) lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCamelCase__ = prime_fac_a.count(__snake_case ) lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(max(__snake_case ,__snake_case ) ): ans *= n else: lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int" lowerCamelCase__ = 0 lowerCamelCase__ = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(__snake_case ): ans += 1 # precondition assert isinstance(__snake_case ,__snake_case ) and is_prime( __snake_case ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict: '''simple docstring''' assert ( is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCamelCase__ = p_number_a + 1 # jump to the next number lowerCamelCase__ = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(__snake_case ): number += 1 while number < p_number_a: ans.append(__snake_case ) number += 1 # fetch the next prime number. while not is_prime(__snake_case ): number += 1 # precondition assert ( isinstance(__snake_case ,__snake_case ) and ans[0] != p_number_a and ans[len(__snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1" lowerCamelCase__ = [] # will be returned. for divisor in range(1 ,n + 1 ): if n % divisor == 0: ans.append(__snake_case ) # precondition assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCamelCase__ = get_divisors(__snake_case ) # precondition assert ( isinstance(__snake_case ,__snake_case ) and (divisors[0] == 1) and (divisors[len(__snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) ) # precondition assert ( isinstance(__snake_case ,__snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase__(__snake_case ) -> Optional[int]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0" lowerCamelCase__ = 1 # this will be return. for factor in range(1 ,n + 1 ): ans *= factor return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0" lowerCamelCase__ = 0 lowerCamelCase__ = 1 lowerCamelCase__ = 1 # this will be return for _ in range(n - 1 ): lowerCamelCase__ = ans ans += fiba lowerCamelCase__ = tmp return ans
29
0
import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() _a = logging.get_logger("transformers.models.encodec") _a = { "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited", "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size", "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed", "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg", } _a = { "encoder.model.0.conv.conv": "encoder.layers.0.conv", "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv", "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv", "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv", "encoder.model.3.conv.conv": "encoder.layers.3.conv", "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv", "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv", "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv", "encoder.model.6.conv.conv": "encoder.layers.6.conv", "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv", "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv", "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv", "encoder.model.9.conv.conv": "encoder.layers.9.conv", "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv", "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv", "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv", "encoder.model.12.conv.conv": "encoder.layers.12.conv", "encoder.model.13.lstm": "encoder.layers.13.lstm", "encoder.model.15.conv.conv": "encoder.layers.15.conv", } _a = { "encoder.model.0.conv.norm": "encoder.layers.0.norm", "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm", "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm", "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm", "encoder.model.3.conv.norm": "encoder.layers.3.norm", "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm", "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm", "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm", "encoder.model.6.conv.norm": "encoder.layers.6.norm", "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm", "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm", "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm", "encoder.model.9.conv.norm": "encoder.layers.9.norm", "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm", "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm", "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm", "encoder.model.12.conv.norm": "encoder.layers.12.norm", "encoder.model.15.conv.norm": "encoder.layers.15.norm", } _a = { "decoder.model.0.conv.conv": "decoder.layers.0.conv", "decoder.model.1.lstm": "decoder.layers.1.lstm", "decoder.model.3.convtr.convtr": "decoder.layers.3.conv", "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv", "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv", "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv", "decoder.model.6.convtr.convtr": "decoder.layers.6.conv", "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv", "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv", "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv", "decoder.model.9.convtr.convtr": "decoder.layers.9.conv", "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv", "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv", "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv", "decoder.model.12.convtr.convtr": "decoder.layers.12.conv", "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv", "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv", "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv", "decoder.model.15.conv.conv": "decoder.layers.15.conv", } _a = { "decoder.model.0.conv.norm": "decoder.layers.0.norm", "decoder.model.3.convtr.norm": "decoder.layers.3.norm", "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm", "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm", "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm", "decoder.model.6.convtr.norm": "decoder.layers.6.norm", "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm", "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm", "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm", "decoder.model.9.convtr.norm": "decoder.layers.9.norm", "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm", "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm", "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm", "decoder.model.12.convtr.norm": "decoder.layers.12.norm", "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm", "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm", "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm", "decoder.model.15.conv.norm": "decoder.layers.15.norm", } _a = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } _a = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } _a = [] _a = [] def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any: '''simple docstring''' for attribute in key.split('''.''' ): lowerCamelCase__ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: lowerCamelCase__ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: lowerCamelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": lowerCamelCase__ = value elif weight_type == "weight_g": lowerCamelCase__ = value elif weight_type == "weight_v": lowerCamelCase__ = value elif weight_type == "bias": lowerCamelCase__ = value elif weight_type == "running_mean": lowerCamelCase__ = value elif weight_type == "running_var": lowerCamelCase__ = value elif weight_type == "num_batches_tracked": lowerCamelCase__ = value elif weight_type == "weight_ih_l0": lowerCamelCase__ = value elif weight_type == "weight_hh_l0": lowerCamelCase__ = value elif weight_type == "bias_ih_l0": lowerCamelCase__ = value elif weight_type == "bias_hh_l0": lowerCamelCase__ = value elif weight_type == "weight_ih_l1": lowerCamelCase__ = value elif weight_type == "weight_hh_l1": lowerCamelCase__ = value elif weight_type == "bias_ih_l1": lowerCamelCase__ = value elif weight_type == "bias_hh_l1": lowerCamelCase__ = value else: lowerCamelCase__ = value logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]: '''simple docstring''' for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCamelCase__ , lowerCamelCase__ = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Any: '''simple docstring''' lowerCamelCase__ = [] if model_name == "encodec_24khz" or "encodec_32khz": lowerCamelCase__ = MAPPING_24K elif model_name == "encodec_48khz": lowerCamelCase__ = MAPPING_48K else: raise ValueError(F'Unsupported model: {model_name}' ) for name, value in orig_dict.items(): if should_ignore(__UpperCamelCase ,__UpperCamelCase ): logger.info(F'{name} was ignored' ) continue lowerCamelCase__ = False for key, mapped_key in MAPPING.items(): if "*" in key: lowerCamelCase__ , lowerCamelCase__ = key.split('''.*.''' ) if prefix in name and suffix in name: lowerCamelCase__ = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue lowerCamelCase__ = True if "*" in mapped_key: lowerCamelCase__ = name.split(__UpperCamelCase )[0].split('''.''' )[-2] lowerCamelCase__ = mapped_key.replace('''*''' ,__UpperCamelCase ) if "weight_g" in name: lowerCamelCase__ = '''weight_g''' elif "weight_v" in name: lowerCamelCase__ = '''weight_v''' elif "weight_ih_l0" in name: lowerCamelCase__ = '''weight_ih_l0''' elif "weight_hh_l0" in name: lowerCamelCase__ = '''weight_hh_l0''' elif "bias_ih_l0" in name: lowerCamelCase__ = '''bias_ih_l0''' elif "bias_hh_l0" in name: lowerCamelCase__ = '''bias_hh_l0''' elif "weight_ih_l1" in name: lowerCamelCase__ = '''weight_ih_l1''' elif "weight_hh_l1" in name: lowerCamelCase__ = '''weight_hh_l1''' elif "bias_ih_l1" in name: lowerCamelCase__ = '''bias_ih_l1''' elif "bias_hh_l1" in name: lowerCamelCase__ = '''bias_hh_l1''' elif "bias" in name: lowerCamelCase__ = '''bias''' elif "weight" in name: lowerCamelCase__ = '''weight''' elif "running_mean" in name: lowerCamelCase__ = '''running_mean''' elif "running_var" in name: lowerCamelCase__ = '''running_var''' elif "num_batches_tracked" in name: lowerCamelCase__ = '''num_batches_tracked''' else: lowerCamelCase__ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(F'Unused weights: {unused_weights}' ) @torch.no_grad() def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ,) -> int: '''simple docstring''' if config_path is not None: lowerCamelCase__ = EncodecConfig.from_pretrained(__UpperCamelCase ) else: lowerCamelCase__ = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowerCamelCase__ = [8, 5, 4, 4] lowerCamelCase__ = [2.2] lowerCamelCase__ = 64 lowerCamelCase__ = 32000 lowerCamelCase__ = 2048 lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False elif model_name == "encodec_48khz": lowerCamelCase__ = [8, 5, 4, 2] lowerCamelCase__ = [3.0, 6.0, 1_2.0, 2_4.0] lowerCamelCase__ = 48000 lowerCamelCase__ = 2 lowerCamelCase__ = False lowerCamelCase__ = '''time_group_norm''' lowerCamelCase__ = True lowerCamelCase__ = 1.0 lowerCamelCase__ = 0.0_1 else: raise ValueError(F'Unknown model name: {model_name}' ) lowerCamelCase__ = EncodecModel(__UpperCamelCase ) lowerCamelCase__ = EncodecFeatureExtractor( feature_size=config.audio_channels ,sampling_rate=config.sampling_rate ,chunk_length_s=config.chunk_length_s ,overlap=config.overlap ,) feature_extractor.save_pretrained(__UpperCamelCase ) lowerCamelCase__ = torch.load(__UpperCamelCase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowerCamelCase__ = original_checkpoint['''best_state'''] recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(__UpperCamelCase ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument( "--model", default="encodec_24khz", type=str, help="The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) _a = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
707
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None: '''simple docstring''' if start is None: lowerCamelCase__ = 0 if end is None: lowerCamelCase__ = len(__snake_case ) - 1 if start >= end: return lowerCamelCase__ = (start + end) // 2 slowsort(__snake_case ,__snake_case ,__snake_case ) slowsort(__snake_case ,mid + 1 ,__snake_case ) if sequence[end] < sequence[mid]: lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end] slowsort(__snake_case ,__snake_case ,end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
29
0
class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = size lowerCamelCase__ = [0] * size lowerCamelCase__ = [0] * size @staticmethod def __lowerCamelCase ( __lowerCAmelCase ): '''simple docstring''' return index | (index + 1) @staticmethod def __lowerCamelCase ( __lowerCAmelCase ): '''simple docstring''' return (index & (index + 1)) - 1 def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = value while index < self.size: lowerCamelCase__ = self.get_prev(_lowerCAmelCase ) + 1 if current_left_border == index: lowerCamelCase__ = value else: lowerCamelCase__ = max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowerCamelCase__ = self.get_next(_lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' right -= 1 # Because of right is exclusive lowerCamelCase__ = 0 while left <= right: lowerCamelCase__ = self.get_prev(_lowerCAmelCase ) if left <= current_left: lowerCamelCase__ = max(_lowerCAmelCase , self.tree[right] ) lowerCamelCase__ = current_left else: lowerCamelCase__ = max(_lowerCAmelCase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
708
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float: '''simple docstring''' if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float: '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float: '''simple docstring''' if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( __snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
29
0
_a = "Alexander Joslin" import operator as op from .stack import Stack def lowerCAmelCase__(__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} lowerCamelCase__ = Stack() lowerCamelCase__ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(snake_case_ ) ) elif i in operators: # RULE 2 operator_stack.push(snake_case_ ) elif i == ")": # RULE 4 lowerCamelCase__ = operator_stack.peek() operator_stack.pop() lowerCamelCase__ = operand_stack.peek() operand_stack.pop() lowerCamelCase__ = operand_stack.peek() operand_stack.pop() lowerCamelCase__ = operators[opr](snake_case_ ,snake_case_ ) operand_stack.push(snake_case_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": _a = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
709
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' def wrapper(*__snake_case ,**__snake_case ): lowerCamelCase__ = timeit.default_timer() lowerCamelCase__ = func(*__snake_case ,**__snake_case ) lowerCamelCase__ = timeit.default_timer() - starttime return delta lowerCamelCase__ = func.__name__ return wrapper def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = [] lowerCamelCase__ = seq_shapes or {} for i in range(__snake_case ): lowerCamelCase__ = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__snake_case ,_ArrayXD ): lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__snake_case ,datasets.Value ): if v.dtype == "string": lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.''' else: lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item() elif isinstance(__snake_case ,datasets.Sequence ): while isinstance(__snake_case ,datasets.Sequence ): lowerCamelCase__ = v.feature lowerCamelCase__ = seq_shapes[k] lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype ) lowerCamelCase__ = data dummy_data.append((i, example) ) return dummy_data def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str: '''simple docstring''' lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case ) with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer: for key, record in dummy_data: lowerCamelCase__ = features.encode_example(__snake_case ) writer.write(__snake_case ) lowerCamelCase__ , lowerCamelCase__ = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' ) lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) ) return dataset
29
0
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml _a = logging.get_logger(__name__) def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]: '''simple docstring''' def run_func(__snake_case ): @wraps(__UpperCamelCase ) def run_in_eager_mode(*__snake_case ,**__snake_case ): return func(*__UpperCamelCase ,**__UpperCamelCase ) @wraps(__UpperCamelCase ) @tf.function(experimental_compile=__UpperCamelCase ) def run_in_graph_mode(*__snake_case ,**__snake_case ): return func(*__UpperCamelCase ,**__UpperCamelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( '''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> ["tf.Tensor"]: '''simple docstring''' lowerCamelCase__ = random.Random() lowerCamelCase__ = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(__UpperCamelCase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa ) class __A ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = """TensorFlow""" @property def __lowerCamelCase ( self ): '''simple docstring''' return tf.__version__ def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return self._measure_speed(_inference ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return self._measure_speed(_train ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ ) lowerCamelCase__ = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return self._measure_memory(_inference ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ ) lowerCamelCase__ = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return self._measure_memory(_train ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) lowerCamelCase__ = ( hasattr(UpperCamelCase__ , '''architectures''' ) and isinstance(config.architectures , UpperCamelCase__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCamelCase__ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model lowerCamelCase__ = __import__('''transformers''' , fromlist=[model_class] ) lowerCamelCase__ = getattr(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ = model_cls(UpperCamelCase__ ) except ImportError: raise ImportError( F'{model_class} does not exist. If you just want to test the pretrained model, you might want to' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: lowerCamelCase__ = TF_MODEL_MAPPING[config.__class__](UpperCamelCase__ ) # encoder-decoder has vocab size saved differently lowerCamelCase__ = config.vocab_size if hasattr(UpperCamelCase__ , '''vocab_size''' ) else config.encoder.vocab_size lowerCamelCase__ = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , training=UpperCamelCase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(UpperCamelCase__ , training=UpperCamelCase__ ) lowerCamelCase__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' ) if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) lowerCamelCase__ = ( hasattr(UpperCamelCase__ , '''architectures''' ) and isinstance(config.architectures , UpperCamelCase__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCamelCase__ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model lowerCamelCase__ = __import__('''transformers''' , fromlist=[model_class] ) lowerCamelCase__ = getattr(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ = model_cls(UpperCamelCase__ ) except ImportError: raise ImportError( F'{model_class} does not exist. If you just want to test the pretrained model, you might want to' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: lowerCamelCase__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase__ ) # encoder-decoder has vocab size saved differently lowerCamelCase__ = config.vocab_size if hasattr(UpperCamelCase__ , '''vocab_size''' ) else config.encoder.vocab_size lowerCamelCase__ = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): lowerCamelCase__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0] lowerCamelCase__ = tf.gradients(UpperCamelCase__ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): lowerCamelCase__ = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0] lowerCamelCase__ = tf.gradients(UpperCamelCase__ , model.trainable_variables ) return gradients lowerCamelCase__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' ) timeit.repeat(UpperCamelCase__ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowerCamelCase__ = timeit.repeat( UpperCamelCase__ , repeat=self.args.repeat , number=1_0 , ) return min(UpperCamelCase__ ) / 1_0.0 except ResourceExhaustedError as e: self.print_fn(F'Doesn\'t fit on GPU. {e}' ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' logger.info( '''Note that TensorFlow allocates more memory than ''' '''it might need to speed up computation. ''' '''The memory reported here corresponds to the memory ''' '''reported by `nvidia-smi`, which can vary depending ''' '''on total available memory on the GPU that is used.''' ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory''' ''' consumption line by line.''' ) lowerCamelCase__ = start_memory_tracing('''transformers''' ) if self.args.is_tpu: # tpu raise NotImplementedError( '''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking''' ''' with `args.memory=False`''' ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( '''py3nvml not installed, we won\'t log GPU memory usage. ''' '''Install py3nvml (pip install py3nvml) to log information about GPU.''' ) lowerCamelCase__ = '''N/A''' else: logger.info( '''Measuring total GPU usage on GPU device. Make sure to not have additional processes''' ''' running on the same GPU.''' ) # init nvml nvml.nvmlInit() func() lowerCamelCase__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowerCamelCase__ = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase__ ) lowerCamelCase__ = meminfo.used lowerCamelCase__ = Memory(UpperCamelCase__ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( '''When enabling line by line tracing, the max peak memory for CPU is inaccurate in''' ''' TensorFlow.''' ) lowerCamelCase__ = None else: lowerCamelCase__ = measure_peak_memory_cpu(UpperCamelCase__ ) lowerCamelCase__ = Memory(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else memory_bytes if self.args.trace_memory_line_by_line: lowerCamelCase__ = stop_memory_tracing(UpperCamelCase__ ) if memory is None: lowerCamelCase__ = summary.total else: lowerCamelCase__ = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F'Doesn\'t fit on GPU. {e}' ) return "N/A", None
710
def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 ,len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase__ = grid[0] for row_n in range(1 ,len(__snake_case ) ): lowerCamelCase__ = grid[row_n] lowerCamelCase__ = fill_row(__snake_case ,__snake_case ) lowerCamelCase__ = grid[row_n] return grid[-1][-1] def lowerCAmelCase__(__snake_case ,__snake_case ) -> list: '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 ,len(__snake_case ) ): current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
29
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _a = logging.get_logger(__name__) class __A ( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' , _lowerCamelCase , ) super().__init__(*_lowerCamelCase , **_lowerCamelCase )
711
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures _a = logging.get_logger(__name__) @dataclass class __A : '''simple docstring''' lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} ) lowerCAmelCase_ = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) lowerCAmelCase_ = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.task_name.lower() class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """train""" lowerCAmelCase_ = """dev""" lowerCAmelCase_ = """test""" class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ): '''simple docstring''' warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , ) lowerCamelCase__ = args lowerCamelCase__ = glue_processors[args.task_name]() lowerCamelCase__ = glue_output_modes[args.task_name] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): try: lowerCamelCase__ = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file lowerCamelCase__ = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) lowerCamelCase__ = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1] lowerCamelCase__ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ = cached_features_file + '''.lock''' with FileLock(__lowerCAmelCase ): if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache: lowerCamelCase__ = time.time() lowerCamelCase__ = torch.load(__lowerCAmelCase ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCamelCase__ = self.processor.get_test_examples(args.data_dir ) else: lowerCamelCase__ = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCamelCase__ = examples[:limit_length] lowerCamelCase__ = glue_convert_examples_to_features( __lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , ) lowerCamelCase__ = time.time() torch.save(self.features , __lowerCAmelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , __lowerCAmelCase ): '''simple docstring''' return self.features[i] def __lowerCamelCase ( self ): '''simple docstring''' return self.label_list
29
0
from collections.abc import Sequence from queue import Queue class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ): '''simple docstring''' lowerCamelCase__ = start lowerCamelCase__ = end lowerCamelCase__ = val lowerCamelCase__ = (start + end) // 2 lowerCamelCase__ = left lowerCamelCase__ = right def __repr__( self ): '''simple docstring''' return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})' class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = collection lowerCamelCase__ = function if self.collection: lowerCamelCase__ = self._build_tree(0 , len(__lowerCAmelCase ) - 1 ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' self._update_tree(self.root , __lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' return self._query_range(self.root , __lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if start == end: return SegmentTreeNode(__lowerCAmelCase , __lowerCAmelCase , self.collection[start] ) lowerCamelCase__ = (start + end) // 2 lowerCamelCase__ = self._build_tree(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = self._build_tree(mid + 1 , __lowerCAmelCase ) return SegmentTreeNode(__lowerCAmelCase , __lowerCAmelCase , self.fn(left.val , right.val ) , __lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if node.start == i and node.end == i: lowerCamelCase__ = val return if i <= node.mid: self._update_tree(node.left , __lowerCAmelCase , __lowerCAmelCase ) else: self._update_tree(node.right , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = self.fn(node.left.val , node.right.val ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , __lowerCAmelCase , __lowerCAmelCase ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , __lowerCAmelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __lowerCAmelCase ) , ) else: # range in right child tree return self._query_range(node.right , __lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' if self.root is not None: lowerCamelCase__ = Queue() queue.put(self.root ) while not queue.empty(): lowerCamelCase__ = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("*" * 50) _a = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
712
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _a = datasets.logging.get_logger(__name__) _a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n" _a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n" _a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n" def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = {doc: key_lines} lowerCamelCase__ = {doc: sys_lines} lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case ) key_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case ) lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case ) if remove_nested: lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case ) lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case ) lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 for name, metric in metrics: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} ) logger.info( name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,) if conll_subparts_num == 3: lowerCamelCase__ = (conll / 3) * 100 logger.info(F'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: lowerCamelCase__ = line.split()[5] if not parse_col == "-": lowerCamelCase__ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Sequence(datasets.Value('''string''' ) ), } ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ): '''simple docstring''' lowerCamelCase__ = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase ) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCamelCase__ = evaluate( key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , ) return score
29
0
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case=5 ) -> Union[str, Any]: '''simple docstring''' assert masked_input.count('''<mask>''' ) == 1 lowerCamelCase__ = torch.tensor(tokenizer.encode(a__ ,add_special_tokens=a__ ) ).unsqueeze(0 ) # Batch size 1 lowerCamelCase__ = model(a__ )[0] # The last hidden-state is the first element of the output tuple lowerCamelCase__ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() lowerCamelCase__ = logits[0, masked_index, :] lowerCamelCase__ = logits.softmax(dim=0 ) lowerCamelCase__ , lowerCamelCase__ = prob.topk(k=a__ ,dim=0 ) lowerCamelCase__ = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(a__ ) )] ) lowerCamelCase__ = tokenizer.mask_token lowerCamelCase__ = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): lowerCamelCase__ = predicted_token_bpe.replace('''\u2581''' ,''' ''' ) if " {0}".format(a__ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(a__ ) ,a__ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(a__ ,a__ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs _a = CamembertTokenizer.from_pretrained("camembert-base") _a = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() _a = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
713
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests _a = open # noqa: we just need to have a builtin inside this module to test it properly
29
0
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge _a = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s" " founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] _a = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowerCAmelCase__() -> Dict: '''simple docstring''' lowerCamelCase__ = calculate_rouge(UpperCamelCase__ ,UpperCamelCase__ ,bootstrap_aggregation=UpperCamelCase__ ,rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(UpperCamelCase__ ,UpperCamelCase__ ) lowerCamelCase__ = calculate_rouge(UpperCamelCase__ ,UpperCamelCase__ ,bootstrap_aggregation=UpperCamelCase__ ,rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def lowerCAmelCase__() -> int: '''simple docstring''' lowerCamelCase__ = '''rougeLsum''' lowerCamelCase__ = calculate_rouge(UpperCamelCase__ ,UpperCamelCase__ ,newline_sep=UpperCamelCase__ ,rouge_keys=[k] )[k] lowerCamelCase__ = calculate_rouge(UpperCamelCase__ ,UpperCamelCase__ ,newline_sep=UpperCamelCase__ ,rouge_keys=[k] )[k] assert score > score_no_sep def lowerCAmelCase__() -> Optional[int]: '''simple docstring''' lowerCamelCase__ = ['''rouge1''', '''rouge2''', '''rougeL'''] lowerCamelCase__ = calculate_rouge(UpperCamelCase__ ,UpperCamelCase__ ,newline_sep=UpperCamelCase__ ,rouge_keys=UpperCamelCase__ ) lowerCamelCase__ = calculate_rouge(UpperCamelCase__ ,UpperCamelCase__ ,newline_sep=UpperCamelCase__ ,rouge_keys=UpperCamelCase__ ) assert score_sep == score_no_sep def lowerCAmelCase__() -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = [ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .''', ] lowerCamelCase__ = [ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(UpperCamelCase__ ,UpperCamelCase__ ,newline_sep=UpperCamelCase__ ) == calculate_rouge(UpperCamelCase__ ,UpperCamelCase__ ,newline_sep=UpperCamelCase__ ) def lowerCAmelCase__() -> Tuple: '''simple docstring''' lowerCamelCase__ = [ '''\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" ''' ] lowerCamelCase__ = [ ''' Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] lowerCamelCase__ = calculate_rouge(UpperCamelCase__ ,UpperCamelCase__ ,rouge_keys=['''rougeLsum'''] ,newline_sep=UpperCamelCase__ )['''rougeLsum'''] lowerCamelCase__ = calculate_rouge(UpperCamelCase__ ,UpperCamelCase__ ,rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def lowerCAmelCase__() -> Dict: '''simple docstring''' lowerCamelCase__ = Path('''examples/seq2seq/test_data/wmt_en_ro''' ) lowerCamelCase__ = calculate_rouge_path(data_dir.joinpath('''test.source''' ) ,data_dir.joinpath('''test.target''' ) ) assert isinstance(UpperCamelCase__ ,UpperCamelCase__ ) lowerCamelCase__ = calculate_rouge_path( data_dir.joinpath('''test.source''' ) ,data_dir.joinpath('''test.target''' ) ,bootstrap_aggregation=UpperCamelCase__ ) assert isinstance(UpperCamelCase__ ,UpperCamelCase__ )
714
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging _a = logging.get_logger(__name__) class __A : '''simple docstring''' lowerCAmelCase_ = None @experimental def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case ) lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits) for index in range(__snake_case ): lowerCamelCase__ = len(__snake_case ) // num_proc lowerCamelCase__ = len(__snake_case ) % num_proc lowerCamelCase__ = div * index + min(__snake_case ,__snake_case ) lowerCamelCase__ = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F'Error dividing inputs iterable among processes. ' F'Total number of objects {len(__snake_case )}, ' F'length: {sum(len(i[1] ) for i in split_kwds )}' ) logger.info( F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' ) lowerCamelCase__ , lowerCamelCase__ = None, None if not disable_tqdm: lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool: lowerCamelCase__ = pool.map(__snake_case ,__snake_case ) logger.info(F'Finished {num_proc} processes' ) lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res] logger.info(F'Unpacked {len(__snake_case )} objects' ) return mapped def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ): return joblib.Parallel()( joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: lowerCamelCase__ = None
29
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch _a = logging.get_logger(__name__) class __A ( __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase_ = ['''pixel_values'''] def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BILINEAR , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 2_5_5 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCamelCase__ = size if size is not None else {'''shortest_edge''': 2_2_4} lowerCamelCase__ = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) lowerCamelCase__ = crop_size if crop_size is not None else {'''height''': 2_5_6, '''width''': 2_5_6} lowerCamelCase__ = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = resample lowerCamelCase__ = do_rescale lowerCamelCase__ = rescale_factor lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_flip_channel_order def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PIL.Image.BILINEAR , __lowerCAmelCase = None , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' ) lowerCamelCase__ = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE ) return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = get_size_dict(__SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ): '''simple docstring''' return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' return flip_channel_order(__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ = resample if resample is not None else self.resample lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase__ = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) lowerCamelCase__ = size if size is not None else self.size lowerCamelCase__ = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) lowerCamelCase__ = crop_size if crop_size is not None else self.crop_size lowerCamelCase__ = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) lowerCamelCase__ = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) # All transformations expect numpy arrays. lowerCamelCase__ = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images] if do_resize: lowerCamelCase__ = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: lowerCamelCase__ = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowerCamelCase__ = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: lowerCamelCase__ = [self.flip_channel_order(image=__SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase__ = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase__ = {'''pixel_values''': images} return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(__SCREAMING_SNAKE_CASE ): lowerCamelCase__ = target_sizes.numpy() lowerCamelCase__ = [] for idx in range(len(__SCREAMING_SNAKE_CASE ) ): lowerCamelCase__ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE ) lowerCamelCase__ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__SCREAMING_SNAKE_CASE ) else: lowerCamelCase__ = logits.argmax(dim=1 ) lowerCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
715
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = 42 class __A ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' @register_to_config def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ): '''simple docstring''' super().__init__() lowerCamelCase__ = num_attention_heads lowerCamelCase__ = attention_head_dim lowerCamelCase__ = num_attention_heads * attention_head_dim lowerCamelCase__ = in_channels lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase ) lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) # 3. Define transformers blocks lowerCamelCase__ = nn.ModuleList( [ BasicTransformerBlock( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , ) for d in range(__lowerCAmelCase ) ] ) lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape lowerCamelCase__ = batch_frames // num_frames lowerCamelCase__ = hidden_states lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) lowerCamelCase__ = self.norm(__lowerCAmelCase ) lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = self.proj_in(__lowerCAmelCase ) # 2. Blocks for block in self.transformer_blocks: lowerCamelCase__ = block( __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , ) # 3. Output lowerCamelCase__ = self.proj_out(__lowerCAmelCase ) lowerCamelCase__ = ( hidden_states[None, None, :] .reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
29
0
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig _a : Dict = { "susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json", "susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json", } class __A ( UpperCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = """ernie_m""" lowerCAmelCase_ = {"""dropout""": """classifier_dropout""", """num_classes""": """num_labels"""} def __init__( self , __lowerCAmelCase = 2_5_0_0_0_2 , __lowerCAmelCase = 7_6_8 , __lowerCAmelCase = 1_2 , __lowerCAmelCase = 1_2 , __lowerCAmelCase = 3_0_7_2 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 5_1_4 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-05 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=__a , **__a ) lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = classifier_dropout lowerCamelCase__ = is_decoder lowerCamelCase__ = act_dropout
716
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _a = [{"type": "code", "content": INSTALL_CONTENT}] _a = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
29
0
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __A ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = """ssube/stable-diffusion-x4-upscaler-onnx""" def __lowerCamelCase ( self , __lowerCAmelCase=0 ): '''simple docstring''' lowerCamelCase__ = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__A ) ) lowerCamelCase__ = torch.manual_seed(__A ) lowerCamelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=__A ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**__A ).images lowerCamelCase__ = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ = np.array( [0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCamelCase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**__A ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ = np.array( [0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**__A ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ = np.array( [0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCamelCase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**__A ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ = np.array( [0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCamelCase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**__A ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ = np.array( [0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __A ( unittest.TestCase ): '''simple docstring''' @property def __lowerCamelCase ( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ort.SessionOptions() lowerCamelCase__ = False return options def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCamelCase__ = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default lowerCamelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCamelCase__ = '''A fantasy landscape, trending on artstation''' lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__A , output_type='''np''' , ) lowerCamelCase__ = output.images lowerCamelCase__ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCamelCase__ = init_image.resize((1_2_8, 1_2_8) ) lowerCamelCase__ = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' ) lowerCamelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCamelCase__ = '''A fantasy landscape, trending on artstation''' lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__A , output_type='''np''' , ) lowerCamelCase__ = output.images lowerCamelCase__ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ = np.array( [0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
717
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _a = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
29
0
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = 0 lowerCamelCase__ = len(_lowerCamelCase ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: lowerCamelCase__ = i + 1 else: lowerCamelCase__ = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
718
import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor _a = logging.get_logger(__name__) class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
29
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _a = get_tests_dir("fixtures") _a = get_tests_dir("fixtures/dummy_feature_extractor_config.json") _a = get_tests_dir("fixtures/dummy-config.json") class __A ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = 0 def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(A__ , A__ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(A__ ) self.assertIsInstance(A__ , A__ ) def __lowerCamelCase ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(A__ ).to_dict() config_dict.pop('''feature_extractor_type''' ) lowerCamelCase__ = WavaVecaFeatureExtractor(**A__ ) # save in new folder model_config.save_pretrained(A__ ) config.save_pretrained(A__ ) lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(A__ ) # make sure private variable is not incorrectly saved lowerCamelCase__ = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(A__ , A__ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(A__ ) self.assertIsInstance(A__ , A__ ) def __lowerCamelCase ( self ): '''simple docstring''' with self.assertRaisesRegex( A__ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCamelCase__ = AutoFeatureExtractor.from_pretrained('''bert-base''' ) def __lowerCamelCase ( self ): '''simple docstring''' with self.assertRaisesRegex( A__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(A__ , revision='''aaaaaa''' ) def __lowerCamelCase ( self ): '''simple docstring''' with self.assertRaisesRegex( A__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): lowerCamelCase__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(A__ ): lowerCamelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(A__ ): lowerCamelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=A__ ) lowerCamelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=A__ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(A__ ) lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(A__ , trust_remote_code=A__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) def __lowerCamelCase ( self ): '''simple docstring''' try: AutoConfig.register('''custom''' , A__ ) AutoFeatureExtractor.register(A__ , A__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A__ ): AutoFeatureExtractor.register(A__ , A__ ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCamelCase__ = CustomFeatureExtractor.from_pretrained(A__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(A__ ) lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(A__ ) self.assertIsInstance(A__ , A__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __lowerCamelCase ( self ): '''simple docstring''' class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = True try: AutoConfig.register('''custom''' , A__ ) AutoFeatureExtractor.register(A__ , A__ ) # If remote code is not set, the default is to use local lowerCamelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. lowerCamelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=A__ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub lowerCamelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=A__ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(not hasattr(A__ , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
719
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any: '''simple docstring''' lowerCamelCase__ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCamelCase__ = { '''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2], '''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1], '''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5], } lowerCamelCase__ = F'{src_lang}-{tgt_lang}' lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n' model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case ) lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' ) print(F'Generating {path}' ) with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f: f.write(__snake_case ) # make sure we are under the root of the project _a = Path(__file__).resolve().parent.parent.parent _a = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: _a = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
29
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' ,set() ) @pytest.fixture def lowerCAmelCase__(__snake_case ) -> Any: '''simple docstring''' class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = metric_id class __A : '''simple docstring''' lowerCAmelCase_ = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def __lowerCamelCase ( self ): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''' ,HfhMock() ) @pytest.mark.parametrize( '''func, args''' ,[(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[int]: '''simple docstring''' if "tmp_path" in args: lowerCamelCase__ = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(lowerCAmelCase__ ,match='''https://huggingface.co/docs/evaluate''' ): func(*lowerCAmelCase__ )
720
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _a = logging.get_logger(__name__) class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
29
0
from ..utils import DummyObject, requires_backends class __A ( metaclass=lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = ["torch", "scipy"] def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def __lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def __lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' requires_backends(cls , ['''torch''', '''scipy'''] )
721
from queue import PriorityQueue from typing import Any import numpy as np def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int: '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf ) lowerCamelCase__ = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowerCamelCase__ = new_cost_f lowerCamelCase__ = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = -1 lowerCamelCase__ = set() lowerCamelCase__ = set() lowerCamelCase__ = {source: 0} lowerCamelCase__ = {destination: 0} lowerCamelCase__ = {source: None} lowerCamelCase__ = {destination: None} lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase__ , lowerCamelCase__ = queue_forward.get() visited_forward.add(__snake_case ) lowerCamelCase__ , lowerCamelCase__ = queue_backward.get() visited_backward.add(__snake_case ) lowerCamelCase__ = pass_and_relaxation( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) lowerCamelCase__ = pass_and_relaxation( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase__ = shortest_distance return shortest_path_distance _a = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } _a = { "B": [["E", 1]], "C": [["B", 1]], "D": [["C", 1]], "F": [["D", 1], ["G", 1]], "E": [[None, np.inf]], "G": [["E", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
29
0
from math import factorial, pi def lowerCAmelCase__(__snake_case ,__snake_case = 30 ) -> List[str]: '''simple docstring''' if not isinstance(__snake_case ,(int, float) ): raise ValueError('''maclaurin_sin() requires either an int or float for theta''' ) if not isinstance(__snake_case ,__snake_case ) or accuracy <= 0: raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' ) lowerCamelCase__ = float(__snake_case ) lowerCamelCase__ = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__snake_case ) ) def lowerCAmelCase__(__snake_case ,__snake_case = 30 ) -> str: '''simple docstring''' if not isinstance(__snake_case ,(int, float) ): raise ValueError('''maclaurin_cos() requires either an int or float for theta''' ) if not isinstance(__snake_case ,__snake_case ) or accuracy <= 0: raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' ) lowerCamelCase__ = float(__snake_case ) lowerCamelCase__ = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
700
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """ClapFeatureExtractor""" lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if audios is not None: lowerCamelCase__ = self.feature_extractor( __lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and audios is not None: lowerCamelCase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer.model_input_names lowerCamelCase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
29
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __A ( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=1_8 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.4814_5466, 0.457_8275, 0.4082_1073] , __lowerCAmelCase=[0.2686_2954, 0.2613_0258, 0.2757_7711] , __lowerCAmelCase=True , ): '''simple docstring''' lowerCamelCase__ = size if size is not None else {"height": 2_2_4, "width": 2_2_4} lowerCamelCase__ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std lowerCamelCase__ = do_convert_rgb def __lowerCamelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def __lowerCamelCase ( self , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase__ = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase__ = [] for i in range(self.batch_size ): lowerCamelCase__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase__ = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase__ = [torch.from_numpy(lowercase__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class __A ( _UpperCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ChineseCLIPImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase__ ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowercase__ , '''size''' ) ) self.assertTrue(hasattr(lowercase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowercase__ , '''center_crop''' ) ) self.assertTrue(hasattr(lowercase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowercase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowercase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowercase__ , '''do_convert_rgb''' ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 2_2_4, '''width''': 2_2_4} ) self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} ) self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase__ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , numpify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase__ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , torchify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase__ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class __A ( _UpperCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ChineseCLIPImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase__ ) lowerCamelCase__ = 3 @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowercase__ , '''size''' ) ) self.assertTrue(hasattr(lowercase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowercase__ , '''center_crop''' ) ) self.assertTrue(hasattr(lowercase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowercase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowercase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowercase__ , '''do_convert_rgb''' ) ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase__ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
701
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope lowerCamelCase__ = projection_dim def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFDPRModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' ) lowerCamelCase__ = tf.constant( [[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP] lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ 0.0323_6253, 0.1275_3335, 0.1681_8509, 0.0027_9786, 0.389_6933, 0.2426_4945, 0.217_8971, -0.0233_5227, -0.0848_1959, -0.1432_4117, ] ] ) self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) _a = logging.getLogger(__name__) def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any: '''simple docstring''' lowerCamelCase__ = np.argmax(SCREAMING_SNAKE_CASE_ ,axis=1 ) return np.sum(outputs == labels ) def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' with open(SCREAMING_SNAKE_CASE_ ,encoding='''utf_8''' ) as f: lowerCamelCase__ = csv.reader(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = [] next(SCREAMING_SNAKE_CASE_ ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE_ ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[int]: '''simple docstring''' lowerCamelCase__ = [] for dataset in encoded_datasets: lowerCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa ) lowerCamelCase__ = np.zeros((n_batch, 2) ,dtype=np.intaa ) lowerCamelCase__ = np.full((n_batch, 2, input_len) ,fill_value=-100 ,dtype=np.intaa ) lowerCamelCase__ = np.zeros((n_batch,) ,dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE_ ): lowerCamelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase__ = with_conta lowerCamelCase__ = with_conta lowerCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) - 1 lowerCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) - 1 lowerCamelCase__ = with_conta lowerCamelCase__ = with_conta lowerCamelCase__ = mc_label lowerCamelCase__ = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) ) return tensor_datasets def lowerCAmelCase__() -> Any: '''simple docstring''' lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--model_name''' ,type=SCREAMING_SNAKE_CASE_ ,default='''openai-gpt''' ,help='''pretrained model name''' ) parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' ,default=SCREAMING_SNAKE_CASE_ ,type=SCREAMING_SNAKE_CASE_ ,required=SCREAMING_SNAKE_CASE_ ,help='''The output directory where the model predictions and checkpoints will be written.''' ,) parser.add_argument('''--train_dataset''' ,type=SCREAMING_SNAKE_CASE_ ,default='''''' ) parser.add_argument('''--eval_dataset''' ,type=SCREAMING_SNAKE_CASE_ ,default='''''' ) parser.add_argument('''--seed''' ,type=SCREAMING_SNAKE_CASE_ ,default=42 ) parser.add_argument('''--num_train_epochs''' ,type=SCREAMING_SNAKE_CASE_ ,default=3 ) parser.add_argument('''--train_batch_size''' ,type=SCREAMING_SNAKE_CASE_ ,default=8 ) parser.add_argument('''--eval_batch_size''' ,type=SCREAMING_SNAKE_CASE_ ,default=16 ) parser.add_argument('''--adam_epsilon''' ,default=1E-8 ,type=SCREAMING_SNAKE_CASE_ ,help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' ,type=SCREAMING_SNAKE_CASE_ ,default=1 ) parser.add_argument( '''--max_steps''' ,default=-1 ,type=SCREAMING_SNAKE_CASE_ ,help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) ,) parser.add_argument( '''--gradient_accumulation_steps''' ,type=SCREAMING_SNAKE_CASE_ ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,) parser.add_argument('''--learning_rate''' ,type=SCREAMING_SNAKE_CASE_ ,default=6.25E-5 ) parser.add_argument('''--warmup_steps''' ,default=0 ,type=SCREAMING_SNAKE_CASE_ ,help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' ,type=SCREAMING_SNAKE_CASE_ ,default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' ,type=SCREAMING_SNAKE_CASE_ ,default=0.0_1 ) parser.add_argument('''--lm_coef''' ,type=SCREAMING_SNAKE_CASE_ ,default=0.9 ) parser.add_argument('''--n_valid''' ,type=SCREAMING_SNAKE_CASE_ ,default=374 ) parser.add_argument('''--server_ip''' ,type=SCREAMING_SNAKE_CASE_ ,default='''''' ,help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' ,type=SCREAMING_SNAKE_CASE_ ,default='''''' ,help='''Can be used for distant debugging.''' ) lowerCamelCase__ = parser.parse_args() print(SCREAMING_SNAKE_CASE_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=SCREAMING_SNAKE_CASE_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCamelCase__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowerCamelCase__ = torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCamelCase__ = ['_start_', '_delimiter_', '_classify_'] lowerCamelCase__ = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) ) model.to(SCREAMING_SNAKE_CASE_ ) # Load and encode the datasets def tokenize_and_encode(__snake_case ): if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj] logger.info('''Encoding dataset...''' ) lowerCamelCase__ = load_rocstories_dataset(args.train_dataset ) lowerCamelCase__ = load_rocstories_dataset(args.eval_dataset ) lowerCamelCase__ = (train_dataset, eval_dataset) lowerCamelCase__ = tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) # Compute the max input length for the Transformer lowerCamelCase__ = model.config.n_positions // 2 - 2 lowerCamelCase__ = max( len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCamelCase__ = min(SCREAMING_SNAKE_CASE_ ,model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCamelCase__ = pre_process_datasets(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = tensor_datasets[0], tensor_datasets[1] lowerCamelCase__ = TensorDataset(*SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = RandomSampler(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = DataLoader(SCREAMING_SNAKE_CASE_ ,sampler=SCREAMING_SNAKE_CASE_ ,batch_size=args.train_batch_size ) lowerCamelCase__ = TensorDataset(*SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = SequentialSampler(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = DataLoader(SCREAMING_SNAKE_CASE_ ,sampler=SCREAMING_SNAKE_CASE_ ,batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCamelCase__ = args.max_steps lowerCamelCase__ = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1 else: lowerCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCamelCase__ = list(model.named_parameters() ) lowerCamelCase__ = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] lowerCamelCase__ = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] lowerCamelCase__ = AdamW(SCREAMING_SNAKE_CASE_ ,lr=args.learning_rate ,eps=args.adam_epsilon ) lowerCamelCase__ = get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE_ ,num_warmup_steps=args.warmup_steps ,num_training_steps=SCREAMING_SNAKE_CASE_ ) if args.do_train: lowerCamelCase__ = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ): lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = tqdm(SCREAMING_SNAKE_CASE_ ,desc='''Training''' ) for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): lowerCamelCase__ = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch ) lowerCamelCase__ = batch lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ ,mc_token_ids=SCREAMING_SNAKE_CASE_ ,lm_labels=SCREAMING_SNAKE_CASE_ ,mc_labels=SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCamelCase__ = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCamelCase__ = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ ,scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCamelCase__ = model.module if hasattr(SCREAMING_SNAKE_CASE_ ,'''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCamelCase__ = os.path.join(args.output_dir ,SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = os.path.join(args.output_dir ,SCREAMING_SNAKE_CASE_ ) torch.save(model_to_save.state_dict() ,SCREAMING_SNAKE_CASE_ ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCamelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCamelCase__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE_ ) if args.do_eval: model.eval() lowerCamelCase__ = 0, 0 lowerCamelCase__ = 0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ ,desc='''Evaluating''' ): lowerCamelCase__ = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch ) lowerCamelCase__ = batch with torch.no_grad(): lowerCamelCase__ = model( SCREAMING_SNAKE_CASE_ ,mc_token_ids=SCREAMING_SNAKE_CASE_ ,lm_labels=SCREAMING_SNAKE_CASE_ ,mc_labels=SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = mc_logits.detach().cpu().numpy() lowerCamelCase__ = mc_labels.to('''cpu''' ).numpy() lowerCamelCase__ = accuracy(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCamelCase__ = eval_loss / nb_eval_steps lowerCamelCase__ = eval_accuracy / nb_eval_examples lowerCamelCase__ = tr_loss / nb_tr_steps if args.do_train else None lowerCamelCase__ = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} lowerCamelCase__ = os.path.join(args.output_dir ,'''eval_results.txt''' ) with open(SCREAMING_SNAKE_CASE_ ,'''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' ,SCREAMING_SNAKE_CASE_ ,str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
702
import string from math import logaa def lowerCAmelCase__(__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = document.translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' ) lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]: '''simple docstring''' lowerCamelCase__ = corpus.lower().translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with '' lowerCamelCase__ = corpus_without_punctuation.split('''\n''' ) lowerCamelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__snake_case )) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float: '''simple docstring''' if smoothing: if n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError('''df must be > 0''' ) elif n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(logaa(n / df ) ,3 ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> float: '''simple docstring''' return round(tf * idf ,3 )
29
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _a = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _a = 250_004 _a = 250_020 @require_sentencepiece @require_tokenizers class __A ( __A , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = MBartTokenizer lowerCAmelCase_ = MBartTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True def __lowerCamelCase ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = MBartTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = MBartTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCamelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowerCamelCase__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def __lowerCamelCase ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCamelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) lowerCamelCase__ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = tokenizer_r.save_pretrained(__lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCamelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ = tokenizer_r.from_pretrained(__lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCAmelCase ) # Save tokenizer rust, legacy_format=True lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it save with the same files self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ = tokenizer_r.from_pretrained(__lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) ) shutil.rmtree(__lowerCAmelCase ) # Save tokenizer rust, legacy_format=False lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase__ = tokenizer_r.from_pretrained(__lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) ) shutil.rmtree(__lowerCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class __A ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = """facebook/mbart-large-en-ro""" lowerCAmelCase_ = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] lowerCAmelCase_ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] lowerCAmelCase_ = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE] @classmethod def __lowerCamelCase ( cls ): '''simple docstring''' lowerCamelCase__ = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) lowerCamelCase__ = 1 return cls def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids ) lowerCamelCase__ = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2] lowerCamelCase__ = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) lowerCamelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ['this is gunna be a long sentence ' * 2_0] assert isinstance(src_text[0] , __lowerCAmelCase ) lowerCamelCase__ = 1_0 lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , __lowerCAmelCase ) self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowerCAmelCase ) lowerCamelCase__ = MBartTokenizer.from_pretrained(__lowerCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase ) @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors='''pt''' ) lowerCamelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) lowerCamelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual((2, 1_4) , batch.input_ids.shape ) self.assertEqual((2, 1_4) , batch.attention_mask.shape ) lowerCamelCase__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors='''pt''' ) lowerCamelCase__ = self.tokenizer( text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0 , return_tensors='''pt''' ) lowerCamelCase__ = targets['input_ids'] lowerCamelCase__ = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , { # A, test, EOS, en_XX '''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 2_5_0_0_0_1, } , )
703
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
29
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", } class __A ( __lowercase ): '''simple docstring''' lowerCAmelCase_ = """funnel""" lowerCAmelCase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", } def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=[4, 4, 4] , __lowerCAmelCase=None , __lowerCAmelCase=2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=6_4 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=None , __lowerCAmelCase=1E-9 , __lowerCAmelCase="mean" , __lowerCAmelCase="relative_shift" , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = vocab_size lowerCamelCase__ = block_sizes lowerCamelCase__ = [1] * len(_A ) if block_repeats is None else block_repeats assert len(_A ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." lowerCamelCase__ = num_decoder_layers lowerCamelCase__ = d_model lowerCamelCase__ = n_head lowerCamelCase__ = d_head lowerCamelCase__ = d_inner lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout lowerCamelCase__ = attention_dropout lowerCamelCase__ = activation_dropout lowerCamelCase__ = initializer_range lowerCamelCase__ = initializer_std lowerCamelCase__ = layer_norm_eps assert pooling_type in [ "mean", "max", ], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.' lowerCamelCase__ = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.' lowerCamelCase__ = attention_type lowerCamelCase__ = separate_cls lowerCamelCase__ = truncate_seq lowerCamelCase__ = pool_q_only super().__init__(**_A ) @property def __lowerCamelCase ( self ): '''simple docstring''' return sum(self.block_sizes ) @num_hidden_layers.setter def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' ) @property def __lowerCamelCase ( self ): '''simple docstring''' return len(self.block_sizes ) @num_blocks.setter def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
704
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _a = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' return (abs(source - target ) / target) < 0.0_1 @pytest.mark.integration def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case ) lowerCamelCase__ = TestCommand(*__snake_case ) test_command.run() lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' ) assert os.path.exists(__snake_case ) lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case ) lowerCamelCase__ = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) ,splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] ,download_size=3940680 ,dataset_size=2589981 ,) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case ,__snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes ) else: result == expected
29
0
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase__(__snake_case ,__snake_case ) -> str: '''simple docstring''' assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' ,[False, True] ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = tmp_path / '''cache''' lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__ = ParquetDatasetReader(lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,keep_in_memory=lowerCAmelCase__ ).read() _check_parquet_dataset(lowerCAmelCase__ ,lowerCAmelCase__ ) @pytest.mark.parametrize( '''features''' ,[ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] ,) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = tmp_path / '''cache''' lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCamelCase__ = features.copy() if features else default_expected_features lowerCamelCase__ = ( Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__ = ParquetDatasetReader(lowerCAmelCase__ ,features=lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ).read() _check_parquet_dataset(lowerCAmelCase__ ,lowerCAmelCase__ ) @pytest.mark.parametrize('''split''' ,[None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[Any]: '''simple docstring''' lowerCamelCase__ = tmp_path / '''cache''' lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCamelCase__ = ParquetDatasetReader(lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,split=lowerCAmelCase__ ).read() _check_parquet_dataset(lowerCAmelCase__ ,lowerCAmelCase__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' ,[str, list] ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Any: '''simple docstring''' if issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase__ = parquet_path elif issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase__ = [parquet_path] lowerCamelCase__ = tmp_path / '''cache''' lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCamelCase__ = ParquetDatasetReader(lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ).read() _check_parquet_dataset(lowerCAmelCase__ ,lowerCAmelCase__ ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=("train",) ) -> Union[str, Any]: '''simple docstring''' assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) for split in splits: lowerCamelCase__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' ,[False, True] ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = tmp_path / '''cache''' lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__ = ParquetDatasetReader( {'''train''': parquet_path} ,cache_dir=lowerCAmelCase__ ,keep_in_memory=lowerCAmelCase__ ).read() _check_parquet_datasetdict(lowerCAmelCase__ ,lowerCAmelCase__ ) @pytest.mark.parametrize( '''features''' ,[ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] ,) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Any: '''simple docstring''' lowerCamelCase__ = tmp_path / '''cache''' lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCamelCase__ = features.copy() if features else default_expected_features lowerCamelCase__ = ( Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__ = ParquetDatasetReader({'''train''': parquet_path} ,features=lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ).read() _check_parquet_datasetdict(lowerCAmelCase__ ,lowerCAmelCase__ ) @pytest.mark.parametrize('''split''' ,[None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[int]: '''simple docstring''' if split: lowerCamelCase__ = {split: parquet_path} else: lowerCamelCase__ = '''train''' lowerCamelCase__ = {'''train''': parquet_path, '''test''': parquet_path} lowerCamelCase__ = tmp_path / '''cache''' lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCamelCase__ = ParquetDatasetReader(lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ).read() _check_parquet_datasetdict(lowerCAmelCase__ ,lowerCAmelCase__ ,splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[int]: '''simple docstring''' lowerCamelCase__ = ParquetDatasetWriter(lowerCAmelCase__ ,tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCamelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCamelCase__ = pf.read() assert dataset.data.table == output_table def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict: '''simple docstring''' lowerCamelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCamelCase__ = {'''image''': [image_path]} lowerCamelCase__ = Features({'''image''': Image()} ) lowerCamelCase__ = Dataset.from_dict(lowerCAmelCase__ ,features=lowerCAmelCase__ ) lowerCamelCase__ = ParquetDatasetWriter(lowerCAmelCase__ ,tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCamelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCamelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) ,streaming=lowerCAmelCase__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' ,[ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] ,) def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' assert get_writer_batch_size(lowerCAmelCase__ ) == expected
705
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = 1_3 lowerCamelCase__ = 7 lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = 9_9 lowerCamelCase__ = 3_2 lowerCamelCase__ = 2 lowerCamelCase__ = 4 lowerCamelCase__ = 3_7 lowerCamelCase__ = '''gelu''' lowerCamelCase__ = 0.1 lowerCamelCase__ = 0.1 lowerCamelCase__ = 5_1_2 lowerCamelCase__ = 1_6 lowerCamelCase__ = 2 lowerCamelCase__ = 0.02 lowerCamelCase__ = 3 lowerCamelCase__ = 4 lowerCamelCase__ = None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): '''simple docstring''' ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = self.prepare_config_and_inputs() lowerCamelCase__ = True lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = True lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''encoder_hidden_states''': encoder_hidden_states, '''encoder_attention_mask''': encoder_attention_mask, } lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ) # Also check the case where encoder outputs are not passed lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase ) lowerCamelCase__ = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.num_labels lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__lowerCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCamelCase__ = model.get_bias() assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) for k, v in name.items(): assert isinstance(__lowerCAmelCase , tf.Variable ) else: lowerCamelCase__ = model.get_output_embeddings() assert x is None lowerCamelCase__ = model.get_bias() assert name is None @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] lowerCamelCase__ = [1, 6, 3_3] self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase ) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json", } class __A ( __lowerCamelCase ): '''simple docstring''' lowerCAmelCase_ = '''lxmert''' lowerCAmelCase_ = {} def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=9_5_0_0 , __lowerCAmelCase=1_6_0_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=9 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=4 , __lowerCAmelCase=6.67 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_attention_heads lowerCamelCase__ = hidden_act lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = num_qa_labels lowerCamelCase__ = num_object_labels lowerCamelCase__ = num_attr_labels lowerCamelCase__ = l_layers lowerCamelCase__ = x_layers lowerCamelCase__ = r_layers lowerCamelCase__ = visual_feat_dim lowerCamelCase__ = visual_pos_dim lowerCamelCase__ = visual_loss_normalizer lowerCamelCase__ = task_matched lowerCamelCase__ = task_mask_lm lowerCamelCase__ = task_obj_predict lowerCamelCase__ = task_qa lowerCamelCase__ = visual_obj_loss lowerCamelCase__ = visual_attr_loss lowerCamelCase__ = visual_feat_loss lowerCamelCase__ = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**a_ )
706
from math import sqrt def lowerCAmelCase__(__snake_case ) -> bool: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' must been an int and positive" lowerCamelCase__ = True # 0 and 1 are none primes. if number <= 1: lowerCamelCase__ = False for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCamelCase__ = False break # precondition assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool" return status def lowerCAmelCase__(__snake_case ) -> Any: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCamelCase__ = list(range(2 ,n + 1 ) ) lowerCamelCase__ = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(__snake_case ) ): for j in range(i + 1 ,len(__snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCamelCase__ = 0 # filters actual prime numbers. lowerCamelCase__ = [x for x in begin_list if x != 0] # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2" lowerCamelCase__ = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 ,n + 1 ): if is_prime(__snake_case ): ans.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0" lowerCamelCase__ = [] # this list will be returns of the function. # potential prime number factors. lowerCamelCase__ = 2 lowerCamelCase__ = number if number == 0 or number == 1: ans.append(__snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(__snake_case ): while quotient != 1: if is_prime(__snake_case ) and (quotient % factor == 0): ans.append(__snake_case ) quotient /= factor else: factor += 1 else: ans.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCamelCase__ = 0 # prime factorization of 'number' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = max(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int" return ans def lowerCAmelCase__(__snake_case ) -> Dict: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCamelCase__ = 0 # prime factorization of 'number' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = min(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int" return ans def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case ) ), "'number' must been an int, even and > 2" lowerCamelCase__ = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCamelCase__ = get_prime_numbers(__snake_case ) lowerCamelCase__ = len(__snake_case ) # run variable for while-loops. lowerCamelCase__ = 0 lowerCamelCase__ = None # exit variable. for break up the loops lowerCamelCase__ = True while i < len_pn and loop: lowerCamelCase__ = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCamelCase__ = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(__snake_case ,__snake_case ) and (len(__snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase__(__snake_case ,__snake_case ) -> str: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCamelCase__ = 0 while numbera != 0: lowerCamelCase__ = numbera % numbera lowerCamelCase__ = numbera lowerCamelCase__ = rest # precondition assert isinstance(__snake_case ,__snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCamelCase__ = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = prime_factorization(__snake_case ) elif numbera == 1 or numbera == 1: lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = max(__snake_case ,__snake_case ) lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCamelCase__ = prime_fac_a.count(__snake_case ) lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(max(__snake_case ,__snake_case ) ): ans *= n else: lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int" lowerCamelCase__ = 0 lowerCamelCase__ = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(__snake_case ): ans += 1 # precondition assert isinstance(__snake_case ,__snake_case ) and is_prime( __snake_case ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict: '''simple docstring''' assert ( is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCamelCase__ = p_number_a + 1 # jump to the next number lowerCamelCase__ = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(__snake_case ): number += 1 while number < p_number_a: ans.append(__snake_case ) number += 1 # fetch the next prime number. while not is_prime(__snake_case ): number += 1 # precondition assert ( isinstance(__snake_case ,__snake_case ) and ans[0] != p_number_a and ans[len(__snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1" lowerCamelCase__ = [] # will be returned. for divisor in range(1 ,n + 1 ): if n % divisor == 0: ans.append(__snake_case ) # precondition assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCamelCase__ = get_divisors(__snake_case ) # precondition assert ( isinstance(__snake_case ,__snake_case ) and (divisors[0] == 1) and (divisors[len(__snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) ) # precondition assert ( isinstance(__snake_case ,__snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase__(__snake_case ) -> Optional[int]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0" lowerCamelCase__ = 1 # this will be return. for factor in range(1 ,n + 1 ): ans *= factor return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0" lowerCamelCase__ = 0 lowerCamelCase__ = 1 lowerCamelCase__ = 1 # this will be return for _ in range(n - 1 ): lowerCamelCase__ = ans ans += fiba lowerCamelCase__ = tmp return ans
29
0
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } _a = { "vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"}, "merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"}, } _a = { "ctrl": 256, } _a = { "Pregnancy": 168_629, "Christianity": 7_675, "Explain": 106_423, "Fitness": 63_440, "Saving": 63_163, "Ask": 27_171, "Ass": 95_985, "Joke": 163_509, "Questions": 45_622, "Thoughts": 49_605, "Retail": 52_342, "Feminism": 164_338, "Writing": 11_992, "Atheism": 192_263, "Netflix": 48_616, "Computing": 39_639, "Opinion": 43_213, "Alone": 44_967, "Funny": 58_917, "Gaming": 40_358, "Human": 4_088, "India": 1_331, "Joker": 77_138, "Diet": 36_206, "Legal": 11_859, "Norman": 4_939, "Tip": 72_689, "Weight": 52_343, "Movies": 46_273, "Running": 23_425, "Science": 2_090, "Horror": 37_793, "Confession": 60_572, "Finance": 12_250, "Politics": 16_360, "Scary": 191_985, "Support": 12_654, "Technologies": 32_516, "Teenage": 66_160, "Event": 32_769, "Learned": 67_460, "Notion": 182_770, "Wikipedia": 37_583, "Books": 6_665, "Extract": 76_050, "Confessions": 102_701, "Conspiracy": 75_932, "Links": 63_674, "Narcissus": 150_425, "Relationship": 54_766, "Relationships": 134_796, "Reviews": 41_671, "News": 4_256, "Translation": 26_820, "multilingual": 128_406, } def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = set() lowerCamelCase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ = char lowerCamelCase__ = set(_snake_case ) return pairs class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = CONTROL_CODES def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="<unk>" , **__lowerCAmelCase ): '''simple docstring''' super().__init__(unk_token=__lowerCAmelCase , **__lowerCAmelCase ) with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: lowerCamelCase__ = json.load(__lowerCAmelCase ) lowerCamelCase__ = {v: k for k, v in self.encoder.items()} with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: lowerCamelCase__ = merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase__ = [tuple(merge.split() ) for merge in merges] lowerCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) lowerCamelCase__ = {} @property def __lowerCamelCase ( self ): '''simple docstring''' return len(self.encoder ) def __lowerCamelCase ( self ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ = tuple(__lowerCAmelCase ) lowerCamelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCamelCase__ = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: lowerCamelCase__ = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ = bigram lowerCamelCase__ = [] lowerCamelCase__ = 0 while i < len(__lowerCAmelCase ): try: lowerCamelCase__ = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ = tuple(__lowerCAmelCase ) lowerCamelCase__ = new_word if len(__lowerCAmelCase ) == 1: break else: lowerCamelCase__ = get_pairs(__lowerCAmelCase ) lowerCamelCase__ = '''@@ '''.join(__lowerCAmelCase ) lowerCamelCase__ = word[:-4] lowerCamelCase__ = word return word def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = [] lowerCamelCase__ = re.findall(r'''\S+\n?''' , __lowerCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return self.decoder.get(__lowerCAmelCase , self.unk_token ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = ''' '''.join(__lowerCAmelCase ).replace('''@@ ''' , '''''' ).strip() return out_string def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__lowerCAmelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase__ = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase__ = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) lowerCamelCase__ = 0 with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase__ = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
707
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None: '''simple docstring''' if start is None: lowerCamelCase__ = 0 if end is None: lowerCamelCase__ = len(__snake_case ) - 1 if start >= end: return lowerCamelCase__ = (start + end) // 2 slowsort(__snake_case ,__snake_case ,__snake_case ) slowsort(__snake_case ,mid + 1 ,__snake_case ) if sequence[end] < sequence[mid]: lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end] slowsort(__snake_case ,__snake_case ,end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
29
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = StableDiffusionInpaintPipeline lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase_ = frozenset([] ) def __lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_SCREAMING_SNAKE_CASE , ) lowerCamelCase__ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE ) torch.manual_seed(0 ) lowerCamelCase__ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) lowerCamelCase__ = CLIPTextModel(_SCREAMING_SNAKE_CASE ) lowerCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=0 ): '''simple docstring''' lowerCamelCase__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((6_4, 6_4) ) lowerCamelCase__ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((6_4, 6_4) ) if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ): lowerCamelCase__ = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: lowerCamelCase__ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) lowerCamelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ = self.get_dummy_components() lowerCamelCase__ = StableDiffusionInpaintPipeline(**_SCREAMING_SNAKE_CASE ) lowerCamelCase__ = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowerCamelCase__ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) lowerCamelCase__ = sd_pipe(**_SCREAMING_SNAKE_CASE ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) lowerCamelCase__ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __A ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowerCamelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowerCamelCase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) lowerCamelCase__ = '''stabilityai/stable-diffusion-2-inpainting''' lowerCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE ) pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() lowerCamelCase__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , ) lowerCamelCase__ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowerCamelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowerCamelCase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) lowerCamelCase__ = '''stabilityai/stable-diffusion-2-inpainting''' lowerCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained( _SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , safety_checker=_SCREAMING_SNAKE_CASE , ) pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() lowerCamelCase__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , ) lowerCamelCase__ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __lowerCamelCase ( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowerCamelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowerCamelCase__ = '''stabilityai/stable-diffusion-2-inpainting''' lowerCamelCase__ = PNDMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='''scheduler''' ) lowerCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained( _SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , ) pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ) lowerCamelCase__ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
708
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float: '''simple docstring''' if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float: '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float: '''simple docstring''' if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( __snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
29
0
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() _a = logging.get_logger(__name__) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[Any]: '''simple docstring''' lowerCamelCase__ = WavaVecaForSequenceClassification.from_pretrained(A_ ,config=A_ ) lowerCamelCase__ = downstream_dict['''projector.weight'''] lowerCamelCase__ = downstream_dict['''projector.bias'''] lowerCamelCase__ = downstream_dict['''model.post_net.linear.weight'''] lowerCamelCase__ = downstream_dict['''model.post_net.linear.bias'''] return model def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = WavaVecaForAudioFrameClassification.from_pretrained(A_ ,config=A_ ) lowerCamelCase__ = downstream_dict['''model.linear.weight'''] lowerCamelCase__ = downstream_dict['''model.linear.bias'''] return model def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = WavaVecaForXVector.from_pretrained(A_ ,config=A_ ) lowerCamelCase__ = downstream_dict['''connector.weight'''] lowerCamelCase__ = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowerCamelCase__ = downstream_dict[ F'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] lowerCamelCase__ = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias'] lowerCamelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] lowerCamelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] lowerCamelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] lowerCamelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] lowerCamelCase__ = downstream_dict['''objective.W'''] return model @torch.no_grad() def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = torch.load(A_ ,map_location='''cpu''' ) lowerCamelCase__ = checkpoint['''Downstream'''] lowerCamelCase__ = WavaVecaConfig.from_pretrained(A_ ) lowerCamelCase__ = WavaVecaFeatureExtractor.from_pretrained( A_ ,return_attention_mask=A_ ,do_normalize=A_ ) lowerCamelCase__ = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): lowerCamelCase__ = convert_classification(A_ ,A_ ,A_ ) elif arch.endswith('''ForAudioFrameClassification''' ): lowerCamelCase__ = convert_diarization(A_ ,A_ ,A_ ) elif arch.endswith('''ForXVector''' ): lowerCamelCase__ = convert_xvector(A_ ,A_ ,A_ ) else: raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: lowerCamelCase__ = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") _a = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
709
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' def wrapper(*__snake_case ,**__snake_case ): lowerCamelCase__ = timeit.default_timer() lowerCamelCase__ = func(*__snake_case ,**__snake_case ) lowerCamelCase__ = timeit.default_timer() - starttime return delta lowerCamelCase__ = func.__name__ return wrapper def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = [] lowerCamelCase__ = seq_shapes or {} for i in range(__snake_case ): lowerCamelCase__ = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__snake_case ,_ArrayXD ): lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__snake_case ,datasets.Value ): if v.dtype == "string": lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.''' else: lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item() elif isinstance(__snake_case ,datasets.Sequence ): while isinstance(__snake_case ,datasets.Sequence ): lowerCamelCase__ = v.feature lowerCamelCase__ = seq_shapes[k] lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype ) lowerCamelCase__ = data dummy_data.append((i, example) ) return dummy_data def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str: '''simple docstring''' lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case ) with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer: for key, record in dummy_data: lowerCamelCase__ = features.encode_example(__snake_case ) writer.write(__snake_case ) lowerCamelCase__ , lowerCamelCase__ = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' ) lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) ) return dataset
29
0
import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class __A ( nn.Module ): '''simple docstring''' def __init__( self ): '''simple docstring''' super().__init__() lowerCamelCase__ = nn.Linear(3 , 4 ) lowerCamelCase__ = nn.BatchNormad(4 ) lowerCamelCase__ = nn.Linear(4 , 5 ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(_A ) ) ) class __A ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , model.state_dict() ) lowerCamelCase__ = os.path.join(_A , '''index.json''' ) self.assertTrue(os.path.isfile(_A ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: lowerCamelCase__ = os.path.join(_A , F'{key}.dat' ) self.assertTrue(os.path.isfile(_A ) ) # TODO: add tests on the fact weights are properly loaded def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: lowerCamelCase__ = torch.randn(2 , 3 , dtype=_A ) with TemporaryDirectory() as tmp_dir: lowerCamelCase__ = offload_weight(_A , '''weight''' , _A , {} ) lowerCamelCase__ = os.path.join(_A , '''weight.dat''' ) self.assertTrue(os.path.isfile(_A ) ) self.assertDictEqual(_A , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(_A ).split('''.''' )[1]}} ) lowerCamelCase__ = load_offloaded_weight(_A , index['''weight'''] ) self.assertTrue(torch.equal(_A , _A ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ModelForTest() lowerCamelCase__ = model.state_dict() lowerCamelCase__ = {k: v for k, v in state_dict.items() if 'linear2' not in k} lowerCamelCase__ = {k: v for k, v in state_dict.items() if 'linear2' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A ) lowerCamelCase__ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A ) # Every key is there with the right value self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key] ) ) lowerCamelCase__ = {k: v for k, v in state_dict.items() if 'weight' in k} lowerCamelCase__ = {k: v for k, v in state_dict.items() if 'weight' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A ) lowerCamelCase__ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A ) # Every key is there with the right value self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A ) # Duplicates are removed lowerCamelCase__ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A ) # Every key is there with the right value self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key] ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = {'a.1': 0, 'a.10': 1, 'a.2': 2} lowerCamelCase__ = extract_submodules_state_dict(_A , ['''a.1''', '''a.2'''] ) self.assertDictEqual(_A , {'''a.1''': 0, '''a.2''': 2} ) lowerCamelCase__ = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2} lowerCamelCase__ = extract_submodules_state_dict(_A , ['''a.1''', '''a.2'''] ) self.assertDictEqual(_A , {'''a.1.a''': 0, '''a.2.a''': 2} )
710
def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 ,len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase__ = grid[0] for row_n in range(1 ,len(__snake_case ) ): lowerCamelCase__ = grid[row_n] lowerCamelCase__ = fill_row(__snake_case ,__snake_case ) lowerCamelCase__ = grid[row_n] return grid[-1][-1] def lowerCAmelCase__(__snake_case ,__snake_case ) -> list: '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 ,len(__snake_case ) ): current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
29
0
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) lowerCamelCase__ = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowerCamelCase__ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids lowerCamelCase__ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids lowerCamelCase__ = shift_tokens_right(__a , model.config.pad_token_id , model.config.decoder_start_token_id ) lowerCamelCase__ = model(__a , decoder_input_ids=__a ).logits lowerCamelCase__ = optax.softmax_cross_entropy(__a , onehot(__a , logits.shape[-1] ) ).mean() lowerCamelCase__ = -(labels.shape[-1] * loss.item()) lowerCamelCase__ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
711
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures _a = logging.get_logger(__name__) @dataclass class __A : '''simple docstring''' lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} ) lowerCAmelCase_ = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) lowerCAmelCase_ = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.task_name.lower() class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """train""" lowerCAmelCase_ = """dev""" lowerCAmelCase_ = """test""" class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ): '''simple docstring''' warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , ) lowerCamelCase__ = args lowerCamelCase__ = glue_processors[args.task_name]() lowerCamelCase__ = glue_output_modes[args.task_name] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): try: lowerCamelCase__ = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file lowerCamelCase__ = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) lowerCamelCase__ = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1] lowerCamelCase__ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ = cached_features_file + '''.lock''' with FileLock(__lowerCAmelCase ): if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache: lowerCamelCase__ = time.time() lowerCamelCase__ = torch.load(__lowerCAmelCase ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCamelCase__ = self.processor.get_test_examples(args.data_dir ) else: lowerCamelCase__ = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCamelCase__ = examples[:limit_length] lowerCamelCase__ = glue_convert_examples_to_features( __lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , ) lowerCamelCase__ = time.time() torch.save(self.features , __lowerCAmelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , __lowerCAmelCase ): '''simple docstring''' return self.features[i] def __lowerCamelCase ( self ): '''simple docstring''' return self.label_list
29
0
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): '''simple docstring''' return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = BioGptModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) lowerCamelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = BioGptForCausalLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = BioGptModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() # create attention mask lowerCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_ ) lowerCamelCase__ = self.seq_length // 2 lowerCamelCase__ = 0 # first forward pass lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids lowerCamelCase__ = ids_tensor((1,) , lowerCamelCase_ ).item() + 1 lowerCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) lowerCamelCase__ = random_other_next_tokens # append to next input_ids and attn_mask lowerCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase__ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase_ )] , dim=1 , ) # get two different outputs lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )["""last_hidden_state"""] lowerCamelCase__ = model(lowerCamelCase_ , past_key_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ )["""last_hidden_state"""] # select random slice lowerCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach() lowerCamelCase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = BioGptModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval() lowerCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_ ) # first forward pass lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ ) lowerCamelCase__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and lowerCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )["""last_hidden_state"""] lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ )[ """last_hidden_state""" ] # select random slice lowerCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=False ): '''simple docstring''' lowerCamelCase__ = BioGptForCausalLM(lowerCamelCase_ ) model.to(lowerCamelCase_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() lowerCamelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def __lowerCamelCase ( self , __lowerCAmelCase , *__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = BioGptModel(lowerCamelCase_ ) lowerCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.num_labels lowerCamelCase__ = BioGptForTokenClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( lowerCamelCase__ ) = config_and_inputs lowerCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) lowerCAmelCase_ = (BioGptForCausalLM,) if is_torch_available() else () lowerCAmelCase_ = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = BioGptModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase_ , gradient_checkpointing=lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase_ ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(lowerCamelCase_ ) lowerCamelCase__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) lowerCamelCase__ = """left""" # Define PAD Token = EOS Token = 50256 lowerCamelCase__ = tokenizer.eos_token lowerCamelCase__ = model.config.eos_token_id # use different length sentences to test batching lowerCamelCase__ = [ """Hello, my dog is a little""", """Today, I""", ] lowerCamelCase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ ) lowerCamelCase__ = inputs["""input_ids"""].to(lowerCamelCase_ ) lowerCamelCase__ = model.generate( input_ids=lowerCamelCase_ , attention_mask=inputs['''attention_mask'''].to(lowerCamelCase_ ) , ) lowerCamelCase__ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase_ ) lowerCamelCase__ = model.generate(input_ids=lowerCamelCase_ ) lowerCamelCase__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() lowerCamelCase__ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase_ ) lowerCamelCase__ = model.generate(input_ids=lowerCamelCase_ , max_length=model.config.max_length - num_paddings ) lowerCamelCase__ = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) lowerCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ ) lowerCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ ) lowerCamelCase__ = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = BioGptModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = 3 lowerCamelCase__ = input_dict["""input_ids"""] lowerCamelCase__ = input_ids.ne(1 ).to(lowerCamelCase_ ) lowerCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCamelCase__ = BioGptForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = 3 lowerCamelCase__ = """multi_label_classification""" lowerCamelCase__ = input_dict["""input_ids"""] lowerCamelCase__ = input_ids.ne(1 ).to(lowerCamelCase_ ) lowerCamelCase__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCamelCase__ = BioGptForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) lowerCamelCase__ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] ) lowerCamelCase__ = model(lowerCamelCase_ )[0] lowerCamelCase__ = 4_2_3_8_4 lowerCamelCase__ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , lowerCamelCase_ ) lowerCamelCase__ = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) lowerCamelCase__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(lowerCamelCase_ ) torch.manual_seed(0 ) lowerCamelCase__ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCamelCase_ ) lowerCamelCase__ = model.generate( **lowerCamelCase_ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowerCamelCase_ , ) lowerCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ ) lowerCamelCase__ = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
712
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _a = datasets.logging.get_logger(__name__) _a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n" _a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n" _a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n" def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = {doc: key_lines} lowerCamelCase__ = {doc: sys_lines} lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case ) key_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case ) lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case ) if remove_nested: lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case ) lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case ) lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 for name, metric in metrics: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} ) logger.info( name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,) if conll_subparts_num == 3: lowerCamelCase__ = (conll / 3) * 100 logger.info(F'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: lowerCamelCase__ = line.split()[5] if not parse_col == "-": lowerCamelCase__ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Sequence(datasets.Value('''string''' ) ), } ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ): '''simple docstring''' lowerCamelCase__ = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase ) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCamelCase__ = evaluate( key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , ) return score
29
0
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowerCAmelCase__(__snake_case ) -> Dict: '''simple docstring''' lowerCamelCase__ = VideoMAEConfig() set_architecture_configs(A__ ,A__ ) if "finetuned" not in model_name: lowerCamelCase__ = False if "finetuned" in model_name: lowerCamelCase__ = '''huggingface/label-files''' if "kinetics" in model_name: lowerCamelCase__ = 400 lowerCamelCase__ = '''kinetics400-id2label.json''' elif "ssv2" in model_name: lowerCamelCase__ = 174 lowerCamelCase__ = '''something-something-v2-id2label.json''' else: raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' ) lowerCamelCase__ = json.load(open(hf_hub_download(A__ ,A__ ,repo_type='''dataset''' ) ,'''r''' ) ) lowerCamelCase__ = {int(A__ ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict: '''simple docstring''' if "small" in model_name: lowerCamelCase__ = 384 lowerCamelCase__ = 1536 lowerCamelCase__ = 12 lowerCamelCase__ = 16 lowerCamelCase__ = 12 lowerCamelCase__ = 3 lowerCamelCase__ = 192 lowerCamelCase__ = 768 elif "large" in model_name: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = 12 lowerCamelCase__ = 8 lowerCamelCase__ = 512 lowerCamelCase__ = 2048 elif "huge" in model_name: lowerCamelCase__ = 1280 lowerCamelCase__ = 5120 lowerCamelCase__ = 32 lowerCamelCase__ = 16 lowerCamelCase__ = 12 lowerCamelCase__ = 8 lowerCamelCase__ = 640 lowerCamelCase__ = 2560 elif "base" not in model_name: raise ValueError('''Model name should include either \"small\", \"base\", \"large\", or \"huge\"''' ) def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' if "encoder." in name: lowerCamelCase__ = name.replace('''encoder.''' ,'''''' ) if "cls_token" in name: lowerCamelCase__ = name.replace('''cls_token''' ,'''videomae.embeddings.cls_token''' ) if "decoder_pos_embed" in name: lowerCamelCase__ = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: lowerCamelCase__ = name.replace('''pos_embed''' ,'''videomae.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCamelCase__ = name.replace('''patch_embed.proj''' ,'''videomae.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCamelCase__ = name.replace('''patch_embed.norm''' ,'''videomae.embeddings.norm''' ) if "decoder.blocks" in name: lowerCamelCase__ = name.replace('''decoder.blocks''' ,'''decoder.decoder_layers''' ) if "blocks" in name: lowerCamelCase__ = name.replace('''blocks''' ,'''videomae.encoder.layer''' ) if "attn.proj" in name: lowerCamelCase__ = name.replace('''attn.proj''' ,'''attention.output.dense''' ) if "attn" in name and "bias" not in name: lowerCamelCase__ = name.replace('''attn''' ,'''attention.self''' ) if "attn" in name: lowerCamelCase__ = name.replace('''attn''' ,'''attention.attention''' ) if "norm1" in name: lowerCamelCase__ = name.replace('''norm1''' ,'''layernorm_before''' ) if "norm2" in name: lowerCamelCase__ = name.replace('''norm2''' ,'''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace('''mlp.fc1''' ,'''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace('''mlp.fc2''' ,'''output.dense''' ) if "decoder_embed" in name: lowerCamelCase__ = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' ) if "decoder_norm" in name: lowerCamelCase__ = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' ) if "decoder_pred" in name: lowerCamelCase__ = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ = name.replace('''norm.weight''' ,'''videomae.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ = name.replace('''norm.bias''' ,'''videomae.layernorm.bias''' ) if "head" in name and "decoder" not in name: lowerCamelCase__ = name.replace('''head''' ,'''classifier''' ) return name def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[int]: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCamelCase__ = orig_state_dict.pop(A__ ) if key.startswith('''encoder.''' ): lowerCamelCase__ = key.replace('''encoder.''' ,'''''' ) if "qkv" in key: lowerCamelCase__ = key.split('''.''' ) if key.startswith('''decoder.blocks''' ): lowerCamelCase__ = config.decoder_hidden_size lowerCamelCase__ = int(key_split[2] ) lowerCamelCase__ = '''decoder.decoder_layers.''' if "weight" in key: lowerCamelCase__ = val[:dim, :] lowerCamelCase__ = val[dim : dim * 2, :] lowerCamelCase__ = val[-dim:, :] else: lowerCamelCase__ = config.hidden_size lowerCamelCase__ = int(key_split[1] ) lowerCamelCase__ = '''videomae.encoder.layer.''' if "weight" in key: lowerCamelCase__ = val[:dim, :] lowerCamelCase__ = val[dim : dim * 2, :] lowerCamelCase__ = val[-dim:, :] else: lowerCamelCase__ = val return orig_state_dict def lowerCAmelCase__() -> Any: '''simple docstring''' lowerCamelCase__ = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) lowerCamelCase__ = np.load(A__ ) return list(A__ ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[Any]: '''simple docstring''' lowerCamelCase__ = get_videomae_config(A__ ) if "finetuned" in model_name: lowerCamelCase__ = VideoMAEForVideoClassification(A__ ) else: lowerCamelCase__ = VideoMAEForPreTraining(A__ ) # download original checkpoint, hosted on Google Drive lowerCamelCase__ = '''pytorch_model.bin''' gdown.cached_download(A__ ,A__ ,quiet=A__ ) lowerCamelCase__ = torch.load(A__ ,map_location='''cpu''' ) if "model" in files: lowerCamelCase__ = files['''model'''] else: lowerCamelCase__ = files['''module'''] lowerCamelCase__ = convert_state_dict(A__ ,A__ ) model.load_state_dict(A__ ) model.eval() # verify model on basic input lowerCamelCase__ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] ) lowerCamelCase__ = prepare_video() lowerCamelCase__ = image_processor(A__ ,return_tensors='''pt''' ) if "finetuned" not in model_name: lowerCamelCase__ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' ,filename='''bool_masked_pos.pt''' ) lowerCamelCase__ = torch.load(A__ ) lowerCamelCase__ = model(**A__ ) lowerCamelCase__ = outputs.logits lowerCamelCase__ = [ '''videomae-small-finetuned-kinetics''', '''videomae-small-finetuned-ssv2''', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) '''videomae-base-short''', '''videomae-base-short-finetuned-kinetics''', '''videomae-base''', '''videomae-base-finetuned-kinetics''', '''videomae-large''', '''videomae-large-finetuned-kinetics''', '''videomae-huge-finetuned-kinetics''', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) '''videomae-base-short-ssv2''', '''videomae-base-short-finetuned-ssv2''', '''videomae-base-ssv2''', '''videomae-base-finetuned-ssv2''', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": lowerCamelCase__ = torch.Size([1, 400] ) lowerCamelCase__ = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] ) elif model_name == "videomae-small-finetuned-ssv2": lowerCamelCase__ = torch.Size([1, 174] ) lowerCamelCase__ = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] ) elif model_name == "videomae-base": lowerCamelCase__ = torch.Size([1, 1408, 1536] ) lowerCamelCase__ = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] ) elif model_name == "videomae-base-short": lowerCamelCase__ = torch.Size([1, 1408, 1536] ) lowerCamelCase__ = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ) # we verified the loss both for normalized and unnormalized targets for this one lowerCamelCase__ = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] ) elif model_name == "videomae-large": lowerCamelCase__ = torch.Size([1, 1408, 1536] ) lowerCamelCase__ = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] ) elif model_name == "videomae-large-finetuned-kinetics": lowerCamelCase__ = torch.Size([1, 400] ) lowerCamelCase__ = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] ) elif model_name == "videomae-huge-finetuned-kinetics": lowerCamelCase__ = torch.Size([1, 400] ) lowerCamelCase__ = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] ) elif model_name == "videomae-base-short-finetuned-kinetics": lowerCamelCase__ = torch.Size([1, 400] ) lowerCamelCase__ = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] ) elif model_name == "videomae-base-finetuned-kinetics": lowerCamelCase__ = torch.Size([1, 400] ) lowerCamelCase__ = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ) elif model_name == "videomae-base-short-ssv2": lowerCamelCase__ = torch.Size([1, 1408, 1536] ) lowerCamelCase__ = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] ) elif model_name == "videomae-base-short-finetuned-ssv2": lowerCamelCase__ = torch.Size([1, 174] ) lowerCamelCase__ = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] ) elif model_name == "videomae-base-ssv2": lowerCamelCase__ = torch.Size([1, 1408, 1536] ) lowerCamelCase__ = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] ) elif model_name == "videomae-base-finetuned-ssv2": lowerCamelCase__ = torch.Size([1, 174] ) lowerCamelCase__ = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] ) else: raise ValueError(F'Model name not supported. Should be one of {model_names}' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] ,A__ ,atol=1E-4 ) else: print('''Logits:''' ,logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] ,A__ ,atol=1E-4 ) print('''Logits ok!''' ) # verify loss, if applicable if model_name == "videomae-base-short": lowerCamelCase__ = outputs.loss assert torch.allclose(A__ ,A__ ,atol=1E-4 ) print('''Loss ok!''' ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(A__ ) model.save_pretrained(A__ ) if push_to_hub: print('''Pushing to the hub...''' ) model.push_to_hub(A__ ,organization='''nielsr''' ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _a = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
713
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests _a = open # noqa: we just need to have a builtin inside this module to test it properly
29
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowerCAmelCase__() -> int: '''simple docstring''' lowerCamelCase__ = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=lowerCamelCase__ ) lowerCamelCase__ = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=lowerCamelCase__ ) env_command_parser(subparsers=lowerCamelCase__ ) launch_command_parser(subparsers=lowerCamelCase__ ) tpu_command_parser(subparsers=lowerCamelCase__ ) test_command_parser(subparsers=lowerCamelCase__ ) # Let's go lowerCamelCase__ = parser.parse_args() if not hasattr(lowerCamelCase__ ,'''func''' ): parser.print_help() exit(1 ) # Run args.func(lowerCamelCase__ ) if __name__ == "__main__": main()
714
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging _a = logging.get_logger(__name__) class __A : '''simple docstring''' lowerCAmelCase_ = None @experimental def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case ) lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits) for index in range(__snake_case ): lowerCamelCase__ = len(__snake_case ) // num_proc lowerCamelCase__ = len(__snake_case ) % num_proc lowerCamelCase__ = div * index + min(__snake_case ,__snake_case ) lowerCamelCase__ = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F'Error dividing inputs iterable among processes. ' F'Total number of objects {len(__snake_case )}, ' F'length: {sum(len(i[1] ) for i in split_kwds )}' ) logger.info( F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' ) lowerCamelCase__ , lowerCamelCase__ = None, None if not disable_tqdm: lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool: lowerCamelCase__ = pool.map(__snake_case ,__snake_case ) logger.info(F'Finished {num_proc} processes' ) lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res] logger.info(F'Unpacked {len(__snake_case )} objects' ) return mapped def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ): return joblib.Parallel()( joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: lowerCamelCase__ = None
29
0
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def lowerCAmelCase__(__snake_case ,__snake_case ) -> np.array: '''simple docstring''' lowerCamelCase__ = F'{sampling_rate}' lowerCamelCase__ = '''1''' lowerCamelCase__ = '''f32le''' lowerCamelCase__ = [ '''ffmpeg''', '''-i''', '''pipe:0''', '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] try: with subprocess.Popen(a__ ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process: lowerCamelCase__ = ffmpeg_process.communicate(a__ ) except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error lowerCamelCase__ = output_stream[0] lowerCamelCase__ = np.frombuffer(a__ ,np.floataa ) if audio.shape[0] == 0: raise ValueError('''Malformed soundfile''' ) return audio def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case = "f32le" ,) -> int: '''simple docstring''' lowerCamelCase__ = F'{sampling_rate}' lowerCamelCase__ = '''1''' if format_for_conversion == "s16le": lowerCamelCase__ = 2 elif format_for_conversion == "f32le": lowerCamelCase__ = 4 else: raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' ) lowerCamelCase__ = platform.system() if system == "Linux": lowerCamelCase__ = '''alsa''' lowerCamelCase__ = '''default''' elif system == "Darwin": lowerCamelCase__ = '''avfoundation''' lowerCamelCase__ = ''':0''' elif system == "Windows": lowerCamelCase__ = '''dshow''' lowerCamelCase__ = '''default''' lowerCamelCase__ = [ '''ffmpeg''', '''-f''', format_, '''-i''', input_, '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-fflags''', '''nobuffer''', '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] lowerCamelCase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowerCamelCase__ = _ffmpeg_stream(a__ ,a__ ) for item in iterator: yield item def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case = None ,__snake_case = None ,__snake_case = "f32le" ,) -> Union[str, Any]: '''simple docstring''' if stream_chunk_s is not None: lowerCamelCase__ = stream_chunk_s else: lowerCamelCase__ = chunk_length_s lowerCamelCase__ = ffmpeg_microphone(a__ ,a__ ,format_for_conversion=a__ ) if format_for_conversion == "s16le": lowerCamelCase__ = np.intaa lowerCamelCase__ = 2 elif format_for_conversion == "f32le": lowerCamelCase__ = np.floataa lowerCamelCase__ = 4 else: raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' ) if stride_length_s is None: lowerCamelCase__ = chunk_length_s / 6 lowerCamelCase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(a__ ,(int, float) ): lowerCamelCase__ = [stride_length_s, stride_length_s] lowerCamelCase__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowerCamelCase__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowerCamelCase__ = datetime.datetime.now() lowerCamelCase__ = datetime.timedelta(seconds=a__ ) for item in chunk_bytes_iter(a__ ,a__ ,stride=(stride_left, stride_right) ,stream=a__ ): # Put everything back in numpy scale lowerCamelCase__ = np.frombuffer(item['''raw'''] ,dtype=a__ ) lowerCamelCase__ = ( item['''stride'''][0] // size_of_sample, item['''stride'''][1] // size_of_sample, ) lowerCamelCase__ = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case = False ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = b'''''' lowerCamelCase__ , lowerCamelCase__ = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' ) lowerCamelCase__ = 0 for raw in iterator: acc += raw if stream and len(a__ ) < chunk_len: lowerCamelCase__ = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(a__ ) >= chunk_len: # We are flushing the accumulator lowerCamelCase__ = (_stride_left, stride_right) lowerCamelCase__ = {'''raw''': acc[:chunk_len], '''stride''': stride} if stream: lowerCamelCase__ = False yield item lowerCamelCase__ = stride_left lowerCamelCase__ = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(a__ ) > stride_left: lowerCamelCase__ = {'''raw''': acc, '''stride''': (_stride_left, 0)} if stream: lowerCamelCase__ = False yield item def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = 2**24 # 16Mo try: with subprocess.Popen(a__ ,stdout=subprocess.PIPE ,bufsize=a__ ) as ffmpeg_process: while True: lowerCamelCase__ = ffmpeg_process.stdout.read(a__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
715
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = 42 class __A ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' @register_to_config def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ): '''simple docstring''' super().__init__() lowerCamelCase__ = num_attention_heads lowerCamelCase__ = attention_head_dim lowerCamelCase__ = num_attention_heads * attention_head_dim lowerCamelCase__ = in_channels lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase ) lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) # 3. Define transformers blocks lowerCamelCase__ = nn.ModuleList( [ BasicTransformerBlock( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , ) for d in range(__lowerCAmelCase ) ] ) lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape lowerCamelCase__ = batch_frames // num_frames lowerCamelCase__ = hidden_states lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) lowerCamelCase__ = self.norm(__lowerCAmelCase ) lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = self.proj_in(__lowerCAmelCase ) # 2. Blocks for block in self.transformer_blocks: lowerCamelCase__ = block( __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , ) # 3. Output lowerCamelCase__ = self.proj_out(__lowerCAmelCase ) lowerCamelCase__ = ( hidden_states[None, None, :] .reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
29
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _a : List[str] = logging.get_logger(__name__) class __A ( __lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = ['''input_features''', '''is_longer'''] def __init__( self , __lowerCAmelCase=6_4 , __lowerCAmelCase=4_8_0_0_0 , __lowerCAmelCase=4_8_0 , __lowerCAmelCase=1_0 , __lowerCAmelCase=1_0_2_4 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , __lowerCAmelCase = 0 , __lowerCAmelCase = 1_4_0_0_0 , __lowerCAmelCase = None , __lowerCAmelCase = "fusion" , __lowerCAmelCase = "repeatpad" , **__lowerCAmelCase , ): '''simple docstring''' super().__init__( feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , ) lowerCamelCase__ = top_db lowerCamelCase__ = truncation lowerCamelCase__ = padding lowerCamelCase__ = fft_window_size lowerCamelCase__ = (fft_window_size >> 1) + 1 lowerCamelCase__ = hop_length lowerCamelCase__ = max_length_s lowerCamelCase__ = max_length_s * sampling_rate lowerCamelCase__ = sampling_rate lowerCamelCase__ = frequency_min lowerCamelCase__ = frequency_max lowerCamelCase__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCAmelCase_ , min_frequency=lowerCAmelCase_ , max_frequency=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , norm=lowerCAmelCase_ , mel_scale='''htk''' , ) lowerCamelCase__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCAmelCase_ , min_frequency=lowerCAmelCase_ , max_frequency=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , norm='''slaney''' , mel_scale='''slaney''' , ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = copy.deepcopy(self.__dict__ ) lowerCamelCase__ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = spectrogram( lowerCAmelCase_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCAmelCase_ , log_mel='''dB''' , ) return log_mel_spectrogram.T def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowerCamelCase__ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowerCamelCase__ = [0] # randomly choose index for each part lowerCamelCase__ = np.random.choice(ranges[0] ) lowerCamelCase__ = np.random.choice(ranges[1] ) lowerCamelCase__ = np.random.choice(ranges[2] ) lowerCamelCase__ = mel[idx_front : idx_front + chunk_frames, :] lowerCamelCase__ = mel[idx_middle : idx_middle + chunk_frames, :] lowerCamelCase__ = mel[idx_back : idx_back + chunk_frames, :] lowerCamelCase__ = torch.tensor(mel[None, None, :] ) lowerCamelCase__ = torch.nn.functional.interpolate( lowerCAmelCase_ , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=lowerCAmelCase_ ) lowerCamelCase__ = mel_shrink[0][0].numpy() lowerCamelCase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowerCamelCase__ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowerCamelCase__ = len(lowerCAmelCase_ ) - max_length lowerCamelCase__ = np.random.randint(0 , overflow + 1 ) lowerCamelCase__ = waveform[idx : idx + max_length] lowerCamelCase__ = self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowerCamelCase__ = self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters ) lowerCamelCase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowerCamelCase__ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowerCamelCase__ = np.stack([mel, mel, mel, mel] , axis=0 ) lowerCamelCase__ = False else: lowerCamelCase__ = self._random_mel_fusion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCamelCase__ = True else: raise NotImplementedError(F'data_truncating {truncation} not implemented' ) else: lowerCamelCase__ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowerCamelCase__ = int(max_length / len(lowerCAmelCase_ ) ) lowerCamelCase__ = np.stack(np.tile(lowerCAmelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowerCamelCase__ = int(max_length / len(lowerCAmelCase_ ) ) lowerCamelCase__ = np.stack(np.tile(lowerCAmelCase_ , lowerCAmelCase_ ) ) lowerCamelCase__ = np.pad(lowerCAmelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": lowerCamelCase__ = self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters ) lowerCamelCase__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: lowerCamelCase__ = self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = truncation if truncation is not None else self.truncation lowerCamelCase__ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' F' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowerCamelCase__ = isinstance(lowerCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) lowerCamelCase__ = is_batched_numpy or ( isinstance(lowerCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ = [np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray ): lowerCamelCase__ = np.asarray(lowerCAmelCase_ , dtype=np.floataa ) elif isinstance(lowerCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ = [np.asarray(lowerCAmelCase_ )] # convert to mel spectrogram, truncate and pad if needed. lowerCamelCase__ = [ self._get_input_mel(lowerCAmelCase_ , max_length if max_length else self.nb_max_samples , lowerCAmelCase_ , lowerCAmelCase_ ) for waveform in raw_speech ] lowerCamelCase__ = [] lowerCamelCase__ = [] for mel, longer in padded_inputs: input_mel.append(lowerCAmelCase_ ) is_longer.append(lowerCAmelCase_ ) if truncation == "fusion" and sum(lowerCAmelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowerCamelCase__ = np.random.randint(0 , len(lowerCAmelCase_ ) ) lowerCamelCase__ = True if isinstance(input_mel[0] , lowerCAmelCase_ ): lowerCamelCase__ = [np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowerCamelCase__ = [[longer] for longer in is_longer] lowerCamelCase__ = {'''input_features''': input_mel, '''is_longer''': is_longer} lowerCamelCase__ = BatchFeature(lowerCAmelCase_ ) if return_tensors is not None: lowerCamelCase__ = input_features.convert_to_tensors(lowerCAmelCase_ ) return input_features
716
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _a = [{"type": "code", "content": INSTALL_CONTENT}] _a = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
29
0
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case ) -> set[str]: '''simple docstring''' lowerCamelCase__ = set(__snake_case ), [start] while stack: lowerCamelCase__ = stack.pop() explored.add(__snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__snake_case ) return explored _a = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, "A"))
717
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _a = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
29
0
from ...processing_utils import ProcessorMixin class __A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' lowerCAmelCase_ = ["""image_processor""", """feature_extractor"""] lowerCAmelCase_ = """TvltImageProcessor""" lowerCAmelCase_ = """TvltFeatureExtractor""" def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' super().__init__(image_processor=snake_case__ , feature_extractor=snake_case__ ) lowerCamelCase__ = image_processor lowerCamelCase__ = feature_extractor def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , *__lowerCAmelCase , **__lowerCAmelCase , ): '''simple docstring''' if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''' ) lowerCamelCase__ = None if images is not None: lowerCamelCase__ = self.image_processor(snake_case__ , mask_pixel=snake_case__ , *snake_case__ , **snake_case__ ) if images_mixed is not None: lowerCamelCase__ = self.image_processor(snake_case__ , is_mixed=snake_case__ , *snake_case__ , **snake_case__ ) if audio is not None: lowerCamelCase__ = self.feature_extractor( snake_case__ , *snake_case__ , sampling_rate=snake_case__ , mask_audio=snake_case__ , **snake_case__ ) lowerCamelCase__ = {} if audio is not None: output_dict.update(snake_case__ ) if images is not None: output_dict.update(snake_case__ ) if images_mixed_dict is not None: output_dict.update(snake_case__ ) return output_dict @property def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.image_processor.model_input_names lowerCamelCase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
718
import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor _a = logging.get_logger(__name__) class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
29
0
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib _a = get_logger() _a = None class __A ( TensorFormatter[Mapping, """jax.Array""", Mapping] ): '''simple docstring''' def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): '''simple docstring''' super().__init__(features=SCREAMING_SNAKE_CASE_ ) import jax from jaxlib.xla_client import Device if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError( F'Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE_ )}, as `jaxlib.xla_extension.Device` ' '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''' ) lowerCamelCase__ = device if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: lowerCamelCase__ = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( F'Device with string identifier {self.device} not listed among the available ' F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ' F'device: {str(jax.devices()[0] )}.' ) lowerCamelCase__ = str(jax.devices()[0] ) lowerCamelCase__ = jnp_array_kwargs @staticmethod def __lowerCamelCase ( ): '''simple docstring''' import jax return {str(SCREAMING_SNAKE_CASE_ ): device for device in jax.devices()} def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and column: if all( isinstance(SCREAMING_SNAKE_CASE_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(SCREAMING_SNAKE_CASE_ , axis=0 ) return column def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(SCREAMING_SNAKE_CASE_ , (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ): return value elif isinstance(SCREAMING_SNAKE_CASE_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() lowerCamelCase__ = {} if isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: lowerCamelCase__ = {'''dtype''': jnp.intaa} else: lowerCamelCase__ = {'''dtype''': jnp.intaa} elif isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): lowerCamelCase__ = {'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): lowerCamelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: lowerCamelCase__ = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(SCREAMING_SNAKE_CASE_ , **{**default_dtype, **self.jnp_array_kwargs} ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(SCREAMING_SNAKE_CASE_ , '''__array__''' ) and not isinstance(SCREAMING_SNAKE_CASE_ , jax.Array ): lowerCamelCase__ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) return self._tensorize(SCREAMING_SNAKE_CASE_ ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE_ , map_list=SCREAMING_SNAKE_CASE_ ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ ) return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_ , pa_table.column_names[0] ) lowerCamelCase__ = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = self._consolidate(SCREAMING_SNAKE_CASE_ ) return column def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for column_name in batch: lowerCamelCase__ = self._consolidate(batch[column_name] ) return batch
719
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any: '''simple docstring''' lowerCamelCase__ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCamelCase__ = { '''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2], '''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1], '''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5], } lowerCamelCase__ = F'{src_lang}-{tgt_lang}' lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n' model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case ) lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' ) print(F'Generating {path}' ) with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f: f.write(__snake_case ) # make sure we are under the root of the project _a = Path(__file__).resolve().parent.parent.parent _a = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: _a = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
29
0
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _a = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _a = 250_004 _a = 250_020 @require_sentencepiece @require_tokenizers class __A ( _snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = MBartaaTokenizer lowerCAmelCase_ = MBartaaTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True def __lowerCamelCase ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = MBartaaTokenizer(snake_case_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=snake_case_ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = "<s>" lowerCamelCase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(snake_case_ ) , 1_0_5_4 ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = MBartaaTokenizer(snake_case_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=snake_case_ ) lowerCamelCase__ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(snake_case_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCamelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( snake_case_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) lowerCamelCase__ = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual( snake_case_ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(snake_case_ ) self.assertListEqual( snake_case_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = {"input_ids": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def __lowerCamelCase ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCamelCase__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) lowerCamelCase__ = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = tokenizer_r.save_pretrained(snake_case_ ) lowerCamelCase__ = tokenizer_p.save_pretrained(snake_case_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCamelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(snake_case_ , snake_case_ ) # Checks everything loads correctly in the same way lowerCamelCase__ = tokenizer_r.from_pretrained(snake_case_ ) lowerCamelCase__ = tokenizer_p.from_pretrained(snake_case_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case_ , snake_case_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case_ ) # Save tokenizer rust, legacy_format=True lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ ) lowerCamelCase__ = tokenizer_p.save_pretrained(snake_case_ ) # Checks it save with the same files self.assertSequenceEqual(snake_case_ , snake_case_ ) # Checks everything loads correctly in the same way lowerCamelCase__ = tokenizer_r.from_pretrained(snake_case_ ) lowerCamelCase__ = tokenizer_p.from_pretrained(snake_case_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case_ , snake_case_ ) ) shutil.rmtree(snake_case_ ) # Save tokenizer rust, legacy_format=False lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ ) lowerCamelCase__ = tokenizer_p.save_pretrained(snake_case_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase__ = tokenizer_r.from_pretrained(snake_case_ ) lowerCamelCase__ = tokenizer_p.from_pretrained(snake_case_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case_ , snake_case_ ) ) shutil.rmtree(snake_case_ ) @require_torch @require_sentencepiece @require_tokenizers class __A ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = """facebook/mbart-large-50-one-to-many-mmt""" lowerCAmelCase_ = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] lowerCAmelCase_ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] lowerCAmelCase_ = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2] @classmethod def __lowerCamelCase ( cls ): '''simple docstring''' lowerCamelCase__ = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) lowerCamelCase__ = 1 return cls def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case_ ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertIn(snake_case_ , self.tokenizer.all_special_ids ) lowerCamelCase__ = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2] lowerCamelCase__ = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) lowerCamelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertNotIn(self.tokenizer.eos_token , snake_case_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ["this is gunna be a long sentence " * 2_0] assert isinstance(src_text[0] , snake_case_ ) lowerCamelCase__ = 1_0 lowerCamelCase__ = self.tokenizer(snake_case_ , max_length=snake_case_ , truncation=snake_case_ ).input_ids[0] self.assertEqual(ids[0] , snake_case_ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case_ ) , snake_case_ ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case_ ) lowerCamelCase__ = MBartaaTokenizer.from_pretrained(snake_case_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case_ ) @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors='''pt''' ) lowerCamelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case_ , truncation=snake_case_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) lowerCamelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case_ , snake_case_ ) self.assertEqual((2, 1_4) , batch.input_ids.shape ) self.assertEqual((2, 1_4) , batch.attention_mask.shape ) lowerCamelCase__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case_ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer(self.src_text , padding=snake_case_ , truncation=snake_case_ , max_length=3 , return_tensors='''pt''' ) lowerCamelCase__ = self.tokenizer( text_target=self.tgt_text , padding=snake_case_ , truncation=snake_case_ , max_length=1_0 , return_tensors='''pt''' ) lowerCamelCase__ = targets["input_ids"] lowerCamelCase__ = shift_tokens_right(snake_case_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(snake_case_ ) , { # en_XX, A, test, EOS '''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 2_5_0_0_0_1, } , )
720
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _a = logging.get_logger(__name__) class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
29
0
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __A ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = MODEL_FOR_MASKED_LM_MAPPING lowerCAmelCase_ = TF_MODEL_FOR_MASKED_LM_MAPPING def __lowerCamelCase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' ) lowerCamelCase__ = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=6 ) , [ {'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''}, {'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''}, ] , ) lowerCamelCase__ = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=6 ) , [ { '''sequence''': '''The largest city in France is grouped''', '''score''': 2.1E-05, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped''', }, { '''sequence''': '''The largest city in France is accuser''', '''score''': 2.1E-05, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser''', }, ] , ) lowerCamelCase__ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=6 ) , [ {'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''}, ] , ) @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' ) lowerCamelCase__ = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=6 ) , [ {'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''}, {'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''}, ] , ) lowerCamelCase__ = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=6 ) , [ { '''sequence''': '''The largest city in France is Maul''', '''score''': 2.2E-05, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul''', }, {'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''}, ] , ) lowerCamelCase__ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=6 ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''}, {'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''}, ] , ) lowerCamelCase__ = unmasker('''My name is <mask> <mask>''' , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=6 ) , [ [ { '''score''': 2.2E-05, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is Maul<mask></s>''', }, {'''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''}, ], [ { '''score''': 2.2E-05, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is<mask> Maul</s>''', }, {'''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''}, ], ] , ) @require_torch_gpu def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' ) # convert model to fp16 pipe.model.half() lowerCamelCase__ = pipe('''Paris is the [MASK] of France.''' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) @slow @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' ) self.run_large_test(UpperCAmelCase__ ) @slow @require_tf def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' ) self.run_large_test(UpperCAmelCase__ ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , [ {'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_1_0, '''token_str''': ''' John'''}, {'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''}, ] , ) lowerCamelCase__ = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , [ { '''sequence''': '''The largest city in France is Paris''', '''score''': 0.251, '''token''': 2_2_0_1, '''token_str''': ''' Paris''', }, { '''sequence''': '''The largest city in France is Lyon''', '''score''': 0.214, '''token''': 1_2_7_9_0, '''token_str''': ''' Lyon''', }, ] , ) lowerCamelCase__ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2_9_4_1, '''token_str''': ''' Te'''}, ] , ) @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' ) lowerCamelCase__ = None lowerCamelCase__ = None self.run_pipeline_test(UpperCAmelCase__ , [] ) @require_tf def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' ) lowerCamelCase__ = None lowerCamelCase__ = None self.run_pipeline_test(UpperCAmelCase__ , [] ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' ) lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ ) lowerCamelCase__ = [ F'This is another {tokenizer.mask_token} test', ] return fill_masker, examples def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = fill_masker.tokenizer lowerCamelCase__ = fill_masker.model lowerCamelCase__ = fill_masker( F'This is a {tokenizer.mask_token}' , ) self.assertEqual( UpperCAmelCase__ , [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ] , ) lowerCamelCase__ = fill_masker([F'This is a {tokenizer.mask_token}'] ) self.assertEqual( UpperCAmelCase__ , [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ] , ) lowerCamelCase__ = fill_masker([F'This is a {tokenizer.mask_token}', F'Another {tokenizer.mask_token} great test.'] ) self.assertEqual( UpperCAmelCase__ , [ [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ], [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ], ] , ) with self.assertRaises(UpperCAmelCase__ ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(UpperCAmelCase__ ): fill_masker('''This is''' ) self.run_test_top_k(UpperCAmelCase__ , UpperCAmelCase__ ) self.run_test_targets(UpperCAmelCase__ , UpperCAmelCase__ ) self.run_test_top_k_targets(UpperCAmelCase__ , UpperCAmelCase__ ) self.fill_mask_with_duplicate_targets_and_top_k(UpperCAmelCase__ , UpperCAmelCase__ ) self.fill_mask_with_multiple_masks(UpperCAmelCase__ , UpperCAmelCase__ ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = tokenizer.get_vocab() lowerCamelCase__ = sorted(vocab.keys() )[:2] # Pipeline argument lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , targets=UpperCAmelCase__ ) lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' ) self.assertEqual( UpperCAmelCase__ , [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ] , ) lowerCamelCase__ = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , UpperCAmelCase__ ) lowerCamelCase__ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(UpperCAmelCase__ ) ) # Call argument lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ ) lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=UpperCAmelCase__ ) self.assertEqual( UpperCAmelCase__ , [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ] , ) lowerCamelCase__ = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , UpperCAmelCase__ ) lowerCamelCase__ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(UpperCAmelCase__ ) ) # Score equivalence lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=UpperCAmelCase__ ) lowerCamelCase__ = [top_mask['''token_str'''] for top_mask in outputs] lowerCamelCase__ = [top_mask['''score'''] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(UpperCAmelCase__ ) == set(UpperCAmelCase__ ): lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=UpperCAmelCase__ ) lowerCamelCase__ = [top_mask['''score'''] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(UpperCAmelCase__ ) , nested_simplify(UpperCAmelCase__ ) ) # Raises with invalid with self.assertRaises(UpperCAmelCase__ ): lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(UpperCAmelCase__ ): lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[''''''] ) with self.assertRaises(UpperCAmelCase__ ): lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets='''''' ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , top_k=2 ) lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' ) self.assertEqual( UpperCAmelCase__ , [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ] , ) lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ ) lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( UpperCAmelCase__ , [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ] , ) self.assertEqual(nested_simplify(UpperCAmelCase__ ) , nested_simplify(UpperCAmelCase__ ) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = tokenizer.get_vocab() lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ ) # top_k=2, ntargets=3 lowerCamelCase__ = sorted(vocab.keys() )[:3] lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 , targets=UpperCAmelCase__ ) # If we use the most probably targets, and filter differently, we should still # have the same results lowerCamelCase__ = [el['''token_str'''] for el in sorted(UpperCAmelCase__ , key=lambda __lowerCAmelCase : x["score"] , reverse=UpperCAmelCase__ )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(UpperCAmelCase__ ).issubset(UpperCAmelCase__ ): lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=3 , targets=UpperCAmelCase__ ) # They should yield exactly the same result self.assertEqual(nested_simplify(UpperCAmelCase__ ) , nested_simplify(UpperCAmelCase__ ) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ ) lowerCamelCase__ = tokenizer.get_vocab() # String duplicates + id duplicates lowerCamelCase__ = sorted(vocab.keys() )[:3] lowerCamelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]] lowerCamelCase__ = fill_masker(F'My name is {tokenizer.mask_token}' , targets=UpperCAmelCase__ , top_k=1_0 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(UpperCAmelCase__ ) , 3 ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ ) lowerCamelCase__ = fill_masker( F'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( UpperCAmelCase__ , [ [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ], [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ], [ {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, {'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )}, ], ] , )
721
from queue import PriorityQueue from typing import Any import numpy as np def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int: '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf ) lowerCamelCase__ = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowerCamelCase__ = new_cost_f lowerCamelCase__ = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = -1 lowerCamelCase__ = set() lowerCamelCase__ = set() lowerCamelCase__ = {source: 0} lowerCamelCase__ = {destination: 0} lowerCamelCase__ = {source: None} lowerCamelCase__ = {destination: None} lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase__ , lowerCamelCase__ = queue_forward.get() visited_forward.add(__snake_case ) lowerCamelCase__ , lowerCamelCase__ = queue_backward.get() visited_backward.add(__snake_case ) lowerCamelCase__ = pass_and_relaxation( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) lowerCamelCase__ = pass_and_relaxation( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase__ = shortest_distance return shortest_path_distance _a = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } _a = { "B": [["E", 1]], "C": [["B", 1]], "D": [["C", 1]], "F": [["D", 1], ["G", 1]], "E": [[None, np.inf]], "G": [["E", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
29
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ) lowerCamelCase__ = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowerCamelCase__ = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids lowerCamelCase__ = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids lowerCamelCase__ = model(_lowerCamelCase , labels=_lowerCamelCase ).loss lowerCamelCase__ = -tf.math.reduce_mean(_lowerCamelCase ).numpy() lowerCamelCase__ = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
700
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """ClapFeatureExtractor""" lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if audios is not None: lowerCamelCase__ = self.feature_extractor( __lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and audios is not None: lowerCamelCase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer.model_input_names lowerCamelCase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
29
0
from math import isqrt def lowerCAmelCase__(__snake_case ) -> str: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 ,isqrt(lowerCamelCase_ ) + 1 ) ) def lowerCAmelCase__(__snake_case = 10**6 ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = 0 lowerCamelCase__ = 1 lowerCamelCase__ = 7 while prime_candidate < max_prime: primes_count += is_prime(lowerCamelCase_ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f"""{solution() = }""")
701
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope lowerCamelCase__ = projection_dim def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFDPRModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' ) lowerCamelCase__ = tf.constant( [[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP] lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ 0.0323_6253, 0.1275_3335, 0.1681_8509, 0.0027_9786, 0.389_6933, 0.2426_4945, 0.217_8971, -0.0233_5227, -0.0848_1959, -0.1432_4117, ] ] ) self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
702
import string from math import logaa def lowerCAmelCase__(__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = document.translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' ) lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]: '''simple docstring''' lowerCamelCase__ = corpus.lower().translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with '' lowerCamelCase__ = corpus_without_punctuation.split('''\n''' ) lowerCamelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__snake_case )) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float: '''simple docstring''' if smoothing: if n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError('''df must be > 0''' ) elif n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(logaa(n / df ) ,3 ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> float: '''simple docstring''' return round(tf * idf ,3 )
29
0
'''simple docstring''' from cva import destroyAllWindows, imread, imshow, waitKey def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(__SCREAMING_SNAKE_CASE ): for j in range(__SCREAMING_SNAKE_CASE ): lowerCamelCase__ = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image _a = imread("image_data/lena.jpg", 1) # convert to its negative _a = convert_to_negative(img) # show result image imshow("negative of original image", img) waitKey(0) destroyAllWindows()
703
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
29
0
from __future__ import annotations from typing import TypedDict class __A ( _UpperCAmelCase ): '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' if not isinstance(__snake_case ,__snake_case ): raise TypeError('''The parameter s type must be str.''' ) return [s[i:] + s[:i] for i in range(len(__snake_case ) )] def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' if not isinstance(__snake_case ,__snake_case ): raise TypeError('''The parameter s type must be str.''' ) if not s: raise ValueError('''The parameter s must not be empty.''' ) lowerCamelCase__ = all_rotations(__snake_case ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation lowerCamelCase__ = { '''bwt_string''': ''''''.join([word[-1] for word in rotations] ), '''idx_original_string''': rotations.index(__snake_case ), } return response def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' if not isinstance(__snake_case ,__snake_case ): raise TypeError('''The parameter bwt_string type must be str.''' ) if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''' ) try: lowerCamelCase__ = int(__snake_case ) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''' ) if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''' ) if idx_original_string >= len(__snake_case ): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' ) lowerCamelCase__ = [''''''] * len(__snake_case ) for _ in range(len(__snake_case ) ): for i in range(len(__snake_case ) ): lowerCamelCase__ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": _a = "Provide a string that I will generate its BWT transform: " _a = input(entry_msg).strip() _a = bwt_transform(s) print( f"""Burrows Wheeler transform for string '{s}' results """ f"""in '{result['bwt_string']}'""" ) _a = reverse_bwt(result["bwt_string"], result["idx_original_string"]) print( f"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """ f"""we get original string '{original_string}'""" )
704
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _a = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' return (abs(source - target ) / target) < 0.0_1 @pytest.mark.integration def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case ) lowerCamelCase__ = TestCommand(*__snake_case ) test_command.run() lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' ) assert os.path.exists(__snake_case ) lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case ) lowerCamelCase__ = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) ,splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] ,download_size=3940680 ,dataset_size=2589981 ,) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case ,__snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes ) else: result == expected
29
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
705
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = 1_3 lowerCamelCase__ = 7 lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = 9_9 lowerCamelCase__ = 3_2 lowerCamelCase__ = 2 lowerCamelCase__ = 4 lowerCamelCase__ = 3_7 lowerCamelCase__ = '''gelu''' lowerCamelCase__ = 0.1 lowerCamelCase__ = 0.1 lowerCamelCase__ = 5_1_2 lowerCamelCase__ = 1_6 lowerCamelCase__ = 2 lowerCamelCase__ = 0.02 lowerCamelCase__ = 3 lowerCamelCase__ = 4 lowerCamelCase__ = None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): '''simple docstring''' ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = self.prepare_config_and_inputs() lowerCamelCase__ = True lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = True lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''encoder_hidden_states''': encoder_hidden_states, '''encoder_attention_mask''': encoder_attention_mask, } lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ) # Also check the case where encoder outputs are not passed lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase ) lowerCamelCase__ = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.num_labels lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__lowerCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCamelCase__ = model.get_bias() assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) for k, v in name.items(): assert isinstance(__lowerCAmelCase , tf.Variable ) else: lowerCamelCase__ = model.get_output_embeddings() assert x is None lowerCamelCase__ = model.get_bias() assert name is None @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] lowerCamelCase__ = [1, 6, 3_3] self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase ) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _a = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["SpeechEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
706
from math import sqrt def lowerCAmelCase__(__snake_case ) -> bool: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' must been an int and positive" lowerCamelCase__ = True # 0 and 1 are none primes. if number <= 1: lowerCamelCase__ = False for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCamelCase__ = False break # precondition assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool" return status def lowerCAmelCase__(__snake_case ) -> Any: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCamelCase__ = list(range(2 ,n + 1 ) ) lowerCamelCase__ = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(__snake_case ) ): for j in range(i + 1 ,len(__snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCamelCase__ = 0 # filters actual prime numbers. lowerCamelCase__ = [x for x in begin_list if x != 0] # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2" lowerCamelCase__ = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 ,n + 1 ): if is_prime(__snake_case ): ans.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0" lowerCamelCase__ = [] # this list will be returns of the function. # potential prime number factors. lowerCamelCase__ = 2 lowerCamelCase__ = number if number == 0 or number == 1: ans.append(__snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(__snake_case ): while quotient != 1: if is_prime(__snake_case ) and (quotient % factor == 0): ans.append(__snake_case ) quotient /= factor else: factor += 1 else: ans.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCamelCase__ = 0 # prime factorization of 'number' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = max(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int" return ans def lowerCAmelCase__(__snake_case ) -> Dict: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCamelCase__ = 0 # prime factorization of 'number' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = min(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int" return ans def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case ) ), "'number' must been an int, even and > 2" lowerCamelCase__ = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCamelCase__ = get_prime_numbers(__snake_case ) lowerCamelCase__ = len(__snake_case ) # run variable for while-loops. lowerCamelCase__ = 0 lowerCamelCase__ = None # exit variable. for break up the loops lowerCamelCase__ = True while i < len_pn and loop: lowerCamelCase__ = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCamelCase__ = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(__snake_case ,__snake_case ) and (len(__snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase__(__snake_case ,__snake_case ) -> str: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCamelCase__ = 0 while numbera != 0: lowerCamelCase__ = numbera % numbera lowerCamelCase__ = numbera lowerCamelCase__ = rest # precondition assert isinstance(__snake_case ,__snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCamelCase__ = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = prime_factorization(__snake_case ) elif numbera == 1 or numbera == 1: lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = max(__snake_case ,__snake_case ) lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCamelCase__ = prime_fac_a.count(__snake_case ) lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(max(__snake_case ,__snake_case ) ): ans *= n else: lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int" lowerCamelCase__ = 0 lowerCamelCase__ = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(__snake_case ): ans += 1 # precondition assert isinstance(__snake_case ,__snake_case ) and is_prime( __snake_case ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict: '''simple docstring''' assert ( is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCamelCase__ = p_number_a + 1 # jump to the next number lowerCamelCase__ = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(__snake_case ): number += 1 while number < p_number_a: ans.append(__snake_case ) number += 1 # fetch the next prime number. while not is_prime(__snake_case ): number += 1 # precondition assert ( isinstance(__snake_case ,__snake_case ) and ans[0] != p_number_a and ans[len(__snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1" lowerCamelCase__ = [] # will be returned. for divisor in range(1 ,n + 1 ): if n % divisor == 0: ans.append(__snake_case ) # precondition assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCamelCase__ = get_divisors(__snake_case ) # precondition assert ( isinstance(__snake_case ,__snake_case ) and (divisors[0] == 1) and (divisors[len(__snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) ) # precondition assert ( isinstance(__snake_case ,__snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase__(__snake_case ) -> Optional[int]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0" lowerCamelCase__ = 1 # this will be return. for factor in range(1 ,n + 1 ): ans *= factor return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0" lowerCamelCase__ = 0 lowerCamelCase__ = 1 lowerCamelCase__ = 1 # this will be return for _ in range(n - 1 ): lowerCamelCase__ = ans ans += fiba lowerCamelCase__ = tmp return ans
29
0
import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser _a = re.compile(R"\s+") def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' return {"hash": hashlib.mda(re.sub(__snake_case ,'''''' ,example['''content'''] ).encode('''utf-8''' ) ).hexdigest()} def lowerCAmelCase__(__snake_case ) -> Dict: '''simple docstring''' lowerCamelCase__ = [len(__snake_case ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(__snake_case ), "line_max": max(__snake_case )} def lowerCAmelCase__(__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = np.mean([c.isalnum() for c in example['''content''']] ) return {"alpha_frac": alpha_frac} def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]: '''simple docstring''' if example["hash"] in uniques: uniques.remove(example['''hash'''] ) return True else: return False def lowerCAmelCase__(__snake_case ,__snake_case=5 ) -> Tuple: '''simple docstring''' lowerCamelCase__ = ["""auto-generated""", """autogenerated""", """automatically generated"""] lowerCamelCase__ = example["""content"""].splitlines() for _, line in zip(range(__snake_case ) ,__snake_case ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def lowerCAmelCase__(__snake_case ,__snake_case=5 ,__snake_case=0.0_5 ) -> List[str]: '''simple docstring''' lowerCamelCase__ = ["""unit tests""", """test file""", """configuration file"""] lowerCamelCase__ = example["""content"""].splitlines() lowerCamelCase__ = 0 lowerCamelCase__ = 0 # first test for _, line in zip(range(__snake_case ) ,__snake_case ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test lowerCamelCase__ = example["""content"""].count('''\n''' ) lowerCamelCase__ = int(coeff * nlines ) for line in lines: count_config += line.lower().count('''config''' ) count_test += line.lower().count('''test''' ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = ["""def """, """class """, """for """, """while """] lowerCamelCase__ = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def lowerCAmelCase__(__snake_case ,__snake_case=4 ) -> Tuple: '''simple docstring''' lowerCamelCase__ = example["""content"""].splitlines() lowerCamelCase__ = 0 for line in lines: counter += line.lower().count('''=''' ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' lowerCamelCase__ = tokenizer(example['''content'''] ,truncation=__snake_case )["""input_ids"""] lowerCamelCase__ = len(example['''content'''] ) / len(__snake_case ) return {"ratio": ratio} def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' lowerCamelCase__ = {} results.update(get_hash(__snake_case ) ) results.update(line_stats(__snake_case ) ) results.update(alpha_stats(__snake_case ) ) results.update(char_token_ratio(__snake_case ) ) results.update(is_autogenerated(__snake_case ) ) results.update(is_config_or_test(__snake_case ) ) results.update(has_no_keywords(__snake_case ) ) results.update(has_few_assignments(__snake_case ) ) return results def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> int: '''simple docstring''' if not check_uniques(__snake_case ,__snake_case ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' with open(__snake_case ,'''rb''' ) as f_in: with gzip.open(str(__snake_case ) + '''.gz''' ,'''wb''' ,compresslevel=6 ) as f_out: shutil.copyfileobj(__snake_case ,__snake_case ) os.unlink(__snake_case ) # Settings _a = HfArgumentParser(PreprocessingArguments) _a = parser.parse_args() if args.num_workers is None: _a = multiprocessing.cpu_count() _a = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset _a = time.time() _a = load_dataset(args.dataset_name, split="train") print(f"""Time to load dataset: {time.time()-t_start:.2f}""") # Run preprocessing _a = time.time() _a = ds.map(preprocess, num_proc=args.num_workers) print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""") # Deduplicate hashes _a = set(ds.unique("hash")) _a = len(uniques) / len(ds) print(f"""Fraction of duplicates: {1-frac:.2%}""") # Deduplicate data and apply heuristics _a = time.time() _a = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args}) print(f"""Time to filter dataset: {time.time()-t_start:.2f}""") print(f"""Size of filtered dataset: {len(ds_filter)}""") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: _a = time.time() _a , _a = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""") print(f"""Size of deduplicate dataset: {len(ds_filter)}""") # Save data in batches of samples_per_file _a = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / "duplicate_clusters.json", "w") as f: json.dump(duplicate_clusters, f) _a = output_dir / "data" data_dir.mkdir(exist_ok=True) _a = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): _a = str(data_dir / f"""file-{file_number+1:012}.json""") _a = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
707
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None: '''simple docstring''' if start is None: lowerCamelCase__ = 0 if end is None: lowerCamelCase__ = len(__snake_case ) - 1 if start >= end: return lowerCamelCase__ = (start + end) // 2 slowsort(__snake_case ,__snake_case ,__snake_case ) slowsort(__snake_case ,mid + 1 ,__snake_case ) if sequence[end] < sequence[mid]: lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end] slowsort(__snake_case ,__snake_case ,end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
29
0
import math def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' return math.sqrt(lowerCAmelCase__ ) * math.sqrt(lowerCAmelCase__ ) == num def lowerCAmelCase__(__snake_case ) -> Optional[int]: '''simple docstring''' lowerCamelCase__ = 0 lowerCamelCase__ = n while left <= right: lowerCamelCase__ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCamelCase__ = mid - 1 else: lowerCamelCase__ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
708
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float: '''simple docstring''' if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float: '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float: '''simple docstring''' if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( __snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
29
0
_a = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _a = [{"type": "code", "content": INSTALL_CONTENT}] _a = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
709
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' def wrapper(*__snake_case ,**__snake_case ): lowerCamelCase__ = timeit.default_timer() lowerCamelCase__ = func(*__snake_case ,**__snake_case ) lowerCamelCase__ = timeit.default_timer() - starttime return delta lowerCamelCase__ = func.__name__ return wrapper def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = [] lowerCamelCase__ = seq_shapes or {} for i in range(__snake_case ): lowerCamelCase__ = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__snake_case ,_ArrayXD ): lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__snake_case ,datasets.Value ): if v.dtype == "string": lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.''' else: lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item() elif isinstance(__snake_case ,datasets.Sequence ): while isinstance(__snake_case ,datasets.Sequence ): lowerCamelCase__ = v.feature lowerCamelCase__ = seq_shapes[k] lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype ) lowerCamelCase__ = data dummy_data.append((i, example) ) return dummy_data def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str: '''simple docstring''' lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case ) with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer: for key, record in dummy_data: lowerCamelCase__ = features.encode_example(__snake_case ) writer.write(__snake_case ) lowerCamelCase__ , lowerCamelCase__ = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' ) lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) ) return dataset
29
0
from math import sqrt def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(sqrt(__snake_case ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase__(__snake_case = 10001 ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = 0 lowerCamelCase__ = 1 while count != nth and number < 3: number += 1 if is_prime(__snake_case ): count += 1 while count != nth: number += 2 if is_prime(__snake_case ): count += 1 return number if __name__ == "__main__": print(f"""{solution() = }""")
710
def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 ,len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase__ = grid[0] for row_n in range(1 ,len(__snake_case ) ): lowerCamelCase__ = grid[row_n] lowerCamelCase__ = fill_row(__snake_case ,__snake_case ) lowerCamelCase__ = grid[row_n] return grid[-1][-1] def lowerCAmelCase__(__snake_case ,__snake_case ) -> list: '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 ,len(__snake_case ) ): current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
29
0
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase_ = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 5_0_2_5_7 , __lowerCAmelCase = 1_0_2_4 , __lowerCAmelCase = 7_6_8 , __lowerCAmelCase = 1_2 , __lowerCAmelCase = 1_2 , __lowerCAmelCase = None , __lowerCAmelCase = "gelu_new" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 1E-5 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = False , ): '''simple docstring''' super().__init__() lowerCamelCase__ = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and' F' `n_embd`: {n_embd} are not equal.' ) lowerCamelCase__ = prefix_inner_dim lowerCamelCase__ = prefix_hidden_dim lowerCamelCase__ = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCamelCase__ = ( nn.Linear(self.prefix_hidden_dim , lowerCAmelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCamelCase__ = GPTaConfig( vocab_size=lowerCAmelCase_ , n_positions=lowerCAmelCase_ , n_embd=lowerCAmelCase_ , n_layer=lowerCAmelCase_ , n_head=lowerCAmelCase_ , n_inner=lowerCAmelCase_ , activation_function=lowerCAmelCase_ , resid_pdrop=lowerCAmelCase_ , embd_pdrop=lowerCAmelCase_ , attn_pdrop=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , initializer_range=lowerCAmelCase_ , scale_attn_weights=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , scale_attn_by_inverse_layer_idx=lowerCAmelCase_ , reorder_and_upcast_attn=lowerCAmelCase_ , ) lowerCamelCase__ = GPTaLMHeadModel(lowerCAmelCase_ ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , ): '''simple docstring''' lowerCamelCase__ = self.transformer.transformer.wte(lowerCAmelCase_ ) lowerCamelCase__ = self.encode_prefix(lowerCAmelCase_ ) lowerCamelCase__ = self.decode_prefix(lowerCAmelCase_ ) lowerCamelCase__ = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: lowerCamelCase__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) lowerCamelCase__ = torch.cat((dummy_token, input_ids) , dim=1 ) lowerCamelCase__ = self.transformer(inputs_embeds=lowerCAmelCase_ , labels=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' return torch.zeros(lowerCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase_ ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return self.encode_prefix(lowerCAmelCase_ ) @torch.no_grad() def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = torch.split(lowerCAmelCase_ , 1 , dim=0 ) lowerCamelCase__ = [] lowerCamelCase__ = [] for feature in features: lowerCamelCase__ = self.decode_prefix(feature.to(lowerCAmelCase_ ) ) # back to the clip feature # Only support beam search for now lowerCamelCase__ , lowerCamelCase__ = self.generate_beam( input_embeds=lowerCAmelCase_ , device=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) lowerCamelCase__ = torch.stack(lowerCAmelCase_ ) lowerCamelCase__ = torch.stack(lowerCAmelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __lowerCamelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = 5 , __lowerCAmelCase = 6_7 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ): '''simple docstring''' lowerCamelCase__ = eos_token_id lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.int ) lowerCamelCase__ = torch.zeros(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.bool ) if input_embeds is not None: lowerCamelCase__ = input_embeds else: lowerCamelCase__ = self.transformer.transformer.wte(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): lowerCamelCase__ = self.transformer(inputs_embeds=lowerCAmelCase_ ) lowerCamelCase__ = outputs.logits lowerCamelCase__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) lowerCamelCase__ = logits.softmax(-1 ).log() if scores is None: lowerCamelCase__ , lowerCamelCase__ = logits.topk(lowerCAmelCase_ , -1 ) lowerCamelCase__ = generated.expand(lowerCAmelCase_ , *generated.shape[1:] ) lowerCamelCase__ , lowerCamelCase__ = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: lowerCamelCase__ = next_tokens else: lowerCamelCase__ = tokens.expand(lowerCAmelCase_ , *tokens.shape[1:] ) lowerCamelCase__ = torch.cat((tokens, next_tokens) , dim=1 ) else: lowerCamelCase__ = -float(np.inf ) lowerCamelCase__ = 0 lowerCamelCase__ = scores[:, None] + logits seq_lengths[~is_stopped] += 1 lowerCamelCase__ = scores_sum / seq_lengths[:, None] lowerCamelCase__ , lowerCamelCase__ = scores_sum_average.view(-1 ).topk(lowerCAmelCase_ , -1 ) lowerCamelCase__ = next_tokens // scores_sum.shape[1] lowerCamelCase__ = seq_lengths[next_tokens_source] lowerCamelCase__ = next_tokens % scores_sum.shape[1] lowerCamelCase__ = next_tokens.unsqueeze(1 ) lowerCamelCase__ = tokens[next_tokens_source] lowerCamelCase__ = torch.cat((tokens, next_tokens) , dim=1 ) lowerCamelCase__ = generated[next_tokens_source] lowerCamelCase__ = scores_sum_average * seq_lengths lowerCamelCase__ = is_stopped[next_tokens_source] lowerCamelCase__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) lowerCamelCase__ = torch.cat((generated, next_token_embed) , dim=1 ) lowerCamelCase__ = is_stopped + next_tokens.eq(lowerCAmelCase_ ).squeeze() if is_stopped.all(): break lowerCamelCase__ = scores / seq_lengths lowerCamelCase__ = scores.argsort(descending=lowerCAmelCase_ ) # tokens tensors are already padded to max_seq_length lowerCamelCase__ = [tokens[i] for i in order] lowerCamelCase__ = torch.stack(lowerCAmelCase_ , dim=0 ) lowerCamelCase__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
711
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures _a = logging.get_logger(__name__) @dataclass class __A : '''simple docstring''' lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} ) lowerCAmelCase_ = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) lowerCAmelCase_ = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.task_name.lower() class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """train""" lowerCAmelCase_ = """dev""" lowerCAmelCase_ = """test""" class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ): '''simple docstring''' warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , ) lowerCamelCase__ = args lowerCamelCase__ = glue_processors[args.task_name]() lowerCamelCase__ = glue_output_modes[args.task_name] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): try: lowerCamelCase__ = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file lowerCamelCase__ = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) lowerCamelCase__ = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1] lowerCamelCase__ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ = cached_features_file + '''.lock''' with FileLock(__lowerCAmelCase ): if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache: lowerCamelCase__ = time.time() lowerCamelCase__ = torch.load(__lowerCAmelCase ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCamelCase__ = self.processor.get_test_examples(args.data_dir ) else: lowerCamelCase__ = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCamelCase__ = examples[:limit_length] lowerCamelCase__ = glue_convert_examples_to_features( __lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , ) lowerCamelCase__ = time.time() torch.save(self.features , __lowerCAmelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , __lowerCAmelCase ): '''simple docstring''' return self.features[i] def __lowerCamelCase ( self ): '''simple docstring''' return self.label_list
29
0
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class __A ( unittest.TestCase , _UpperCamelCase ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = load_tool('''text-to-speech''' ) self.tool.setup() def __lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ = self.tool('''hey''' ) lowerCamelCase__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) ) def __lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ = self.tool('''hey''' ) lowerCamelCase__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
712
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _a = datasets.logging.get_logger(__name__) _a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n" _a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n" _a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n" def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = {doc: key_lines} lowerCamelCase__ = {doc: sys_lines} lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case ) key_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case ) lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case ) if remove_nested: lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case ) lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case ) lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 for name, metric in metrics: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} ) logger.info( name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,) if conll_subparts_num == 3: lowerCamelCase__ = (conll / 3) * 100 logger.info(F'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: lowerCamelCase__ = line.split()[5] if not parse_col == "-": lowerCamelCase__ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Sequence(datasets.Value('''string''' ) ), } ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ): '''simple docstring''' lowerCamelCase__ = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase ) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCamelCase__ = evaluate( key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , ) return score
29
0
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __A : '''simple docstring''' @staticmethod def __lowerCamelCase ( *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class __A ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase__ = [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = object_detector(examples[0] , threshold=0.0 ) lowerCamelCase__ = len(_A ) self.assertGreater(_A , 0 ) self.assertEqual( _A , [ { '''score''': ANY(_A ), '''label''': ANY(_A ), '''box''': {'''xmin''': ANY(_A ), '''ymin''': ANY(_A ), '''xmax''': ANY(_A ), '''ymax''': ANY(_A )}, } for i in range(_A ) ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase__ = object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [ {'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, {'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}}, {'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, ] , ) lowerCamelCase__ = object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [ [ {'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, {'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}}, {'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, ] ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline('''zero-shot-object-detection''' ) lowerCamelCase__ = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [ {'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ] , ) lowerCamelCase__ = object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ] , ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [ [ {'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ], [ {'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ], ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = 0.2 lowerCamelCase__ = pipeline('''zero-shot-object-detection''' ) lowerCamelCase__ = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=_A , ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [ {'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = 2 lowerCamelCase__ = pipeline('''zero-shot-object-detection''' ) lowerCamelCase__ = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=_A , ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [ {'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, ] , )
713
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests _a = open # noqa: we just need to have a builtin inside this module to test it properly
29
0
class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = size lowerCamelCase__ = [0] * size lowerCamelCase__ = [0] * size @staticmethod def __lowerCamelCase ( __lowerCAmelCase ): '''simple docstring''' return index | (index + 1) @staticmethod def __lowerCamelCase ( __lowerCAmelCase ): '''simple docstring''' return (index & (index + 1)) - 1 def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = value while index < self.size: lowerCamelCase__ = self.get_prev(__UpperCamelCase ) + 1 if current_left_border == index: lowerCamelCase__ = value else: lowerCamelCase__ = max(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) lowerCamelCase__ = self.get_next(__UpperCamelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' right -= 1 # Because of right is exclusive lowerCamelCase__ = 0 while left <= right: lowerCamelCase__ = self.get_prev(__UpperCamelCase ) if left <= current_left: lowerCamelCase__ = max(__UpperCamelCase , self.tree[right] ) lowerCamelCase__ = current_left else: lowerCamelCase__ = max(__UpperCamelCase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
714
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging _a = logging.get_logger(__name__) class __A : '''simple docstring''' lowerCAmelCase_ = None @experimental def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case ) lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits) for index in range(__snake_case ): lowerCamelCase__ = len(__snake_case ) // num_proc lowerCamelCase__ = len(__snake_case ) % num_proc lowerCamelCase__ = div * index + min(__snake_case ,__snake_case ) lowerCamelCase__ = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F'Error dividing inputs iterable among processes. ' F'Total number of objects {len(__snake_case )}, ' F'length: {sum(len(i[1] ) for i in split_kwds )}' ) logger.info( F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' ) lowerCamelCase__ , lowerCamelCase__ = None, None if not disable_tqdm: lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool: lowerCamelCase__ = pool.map(__snake_case ,__snake_case ) logger.info(F'Finished {num_proc} processes' ) lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res] logger.info(F'Unpacked {len(__snake_case )} objects' ) return mapped def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ): return joblib.Parallel()( joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: lowerCamelCase__ = None
29
0
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _a = logging.getLogger(__name__) class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase=-1 ): '''simple docstring''' lowerCamelCase__ = label_idx def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = mode.value lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'{mode}.txt' ) lowerCamelCase__ = 1 lowerCamelCase__ = [] with open(__lowerCAmelCase , encoding='''utf-8''' ) as f: lowerCamelCase__ = [] lowerCamelCase__ = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) ) guid_index += 1 lowerCamelCase__ = [] lowerCamelCase__ = [] else: lowerCamelCase__ = line.split(''' ''' ) words.append(splits[0] ) if len(__lowerCAmelCase ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) ) return examples def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(__lowerCAmelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowerCamelCase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(__lowerCAmelCase ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if path: with open(__lowerCAmelCase , '''r''' ) as f: lowerCamelCase__ = f.read().splitlines() if "O" not in labels: lowerCamelCase__ = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self ): '''simple docstring''' super().__init__(label_idx=-2 ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if path: with open(__lowerCAmelCase , '''r''' ) as f: lowerCamelCase__ = f.read().splitlines() if "O" not in labels: lowerCamelCase__ = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __A ( lowerCAmelCase ): '''simple docstring''' def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = mode.value lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'{mode}.txt' ) lowerCamelCase__ = 1 lowerCamelCase__ = [] with open(__lowerCAmelCase , encoding='''utf-8''' ) as f: for sentence in parse_incr(__lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) ) guid_index += 1 return examples def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = 0 for sentence in parse_incr(__lowerCAmelCase ): lowerCamelCase__ = preds_list[example_id] lowerCamelCase__ = '''''' for token in sentence: out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(__lowerCAmelCase ) example_id += 1 def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if path: with open(__lowerCAmelCase , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
715
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = 42 class __A ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' @register_to_config def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ): '''simple docstring''' super().__init__() lowerCamelCase__ = num_attention_heads lowerCamelCase__ = attention_head_dim lowerCamelCase__ = num_attention_heads * attention_head_dim lowerCamelCase__ = in_channels lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase ) lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) # 3. Define transformers blocks lowerCamelCase__ = nn.ModuleList( [ BasicTransformerBlock( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , ) for d in range(__lowerCAmelCase ) ] ) lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape lowerCamelCase__ = batch_frames // num_frames lowerCamelCase__ = hidden_states lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) lowerCamelCase__ = self.norm(__lowerCAmelCase ) lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = self.proj_in(__lowerCAmelCase ) # 2. Blocks for block in self.transformer_blocks: lowerCamelCase__ = block( __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , ) # 3. Output lowerCamelCase__ = self.proj_out(__lowerCAmelCase ) lowerCamelCase__ = ( hidden_states[None, None, :] .reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
29
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a : Any = { """configuration_informer""": [ """INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = [ """INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """InformerForPrediction""", """InformerModel""", """InformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys _a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
716
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _a = [{"type": "code", "content": INSTALL_CONTENT}] _a = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
29
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _a = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
717
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _a = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
29
0
class __A : # Public class to implement a graph '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = row lowerCamelCase__ = col lowerCamelCase__ = graph def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowerCamelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowerCamelCase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __lowerCAmelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __lowerCAmelCase ) def __lowerCamelCase ( self ): # And finally, count all islands. '''simple docstring''' lowerCamelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowerCamelCase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) count += 1 return count
718
import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor _a = logging.get_logger(__name__) class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
29
0
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __A ( lowercase_ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , ): '''simple docstring''' super().__init__() self.register_modules(transformer=__lowerCAmelCase , vae=__lowerCAmelCase , scheduler=__lowerCAmelCase ) # create a imagenet -> id dictionary for easier use lowerCamelCase__ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(''',''' ): lowerCamelCase__ = int(__lowerCAmelCase ) lowerCamelCase__ = dict(sorted(self.labels.items() ) ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = list(__lowerCAmelCase ) for l in label: if l not in self.labels: raise ValueError( F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 4.0 , __lowerCAmelCase = None , __lowerCAmelCase = 5_0 , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , ): '''simple docstring''' lowerCamelCase__ = len(__lowerCAmelCase ) lowerCamelCase__ = self.transformer.config.sample_size lowerCamelCase__ = self.transformer.config.in_channels lowerCamelCase__ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__lowerCAmelCase , device=self.device , dtype=self.transformer.dtype , ) lowerCamelCase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents lowerCamelCase__ = torch.tensor(__lowerCAmelCase , device=self.device ).reshape(-1 ) lowerCamelCase__ = torch.tensor([1_0_0_0] * batch_size , device=self.device ) lowerCamelCase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(__lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: lowerCamelCase__ = latent_model_input[: len(__lowerCAmelCase ) // 2] lowerCamelCase__ = torch.cat([half, half] , dim=0 ) lowerCamelCase__ = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = t if not torch.is_tensor(__lowerCAmelCase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) lowerCamelCase__ = latent_model_input.device.type == '''mps''' if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = torch.floataa if is_mps else torch.floataa else: lowerCamelCase__ = torch.intaa if is_mps else torch.intaa lowerCamelCase__ = torch.tensor([timesteps] , dtype=__lowerCAmelCase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: lowerCamelCase__ = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCamelCase__ = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output lowerCamelCase__ = self.transformer( __lowerCAmelCase , timestep=__lowerCAmelCase , class_labels=__lowerCAmelCase ).sample # perform guidance if guidance_scale > 1: lowerCamelCase__ , lowerCamelCase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] lowerCamelCase__ , lowerCamelCase__ = torch.split(__lowerCAmelCase , len(__lowerCAmelCase ) // 2 , dim=0 ) lowerCamelCase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) lowerCamelCase__ = torch.cat([half_eps, half_eps] , dim=0 ) lowerCamelCase__ = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: lowerCamelCase__ , lowerCamelCase__ = torch.split(__lowerCAmelCase , __lowerCAmelCase , dim=1 ) else: lowerCamelCase__ = noise_pred # compute previous image: x_t -> x_t-1 lowerCamelCase__ = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample if guidance_scale > 1: lowerCamelCase__ , lowerCamelCase__ = latent_model_input.chunk(2 , dim=0 ) else: lowerCamelCase__ = latent_model_input lowerCamelCase__ = 1 / self.vae.config.scaling_factor * latents lowerCamelCase__ = self.vae.decode(__lowerCAmelCase ).sample lowerCamelCase__ = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCamelCase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCamelCase__ = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=__lowerCAmelCase )
719
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any: '''simple docstring''' lowerCamelCase__ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCamelCase__ = { '''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2], '''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1], '''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5], } lowerCamelCase__ = F'{src_lang}-{tgt_lang}' lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n' model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case ) lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' ) print(F'Generating {path}' ) with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f: f.write(__snake_case ) # make sure we are under the root of the project _a = Path(__file__).resolve().parent.parent.parent _a = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: _a = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
29
0
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class __A ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = MODEL_FOR_CAUSAL_LM_MAPPING lowerCAmelCase_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCamelCase__ = text_generator('''This is a test''' , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCamelCase__ = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __lowerCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCamelCase__ = text_generator('''This is a test''' , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ {'''generated_token_ids''': ANY(__lowerCAmelCase )}, {'''generated_token_ids''': ANY(__lowerCAmelCase )}, ] , ) lowerCamelCase__ = text_generator.model.config.eos_token_id lowerCamelCase__ = '''<pad>''' lowerCamelCase__ = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , ) self.assertEqual( __lowerCAmelCase , [ [ {'''generated_token_ids''': ANY(__lowerCAmelCase )}, {'''generated_token_ids''': ANY(__lowerCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__lowerCAmelCase )}, {'''generated_token_ids''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCamelCase__ = text_generator('''This is a test''' , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCamelCase__ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) return text_generator, ["This is a test", "Another test"] def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = '''Hello I believe in''' lowerCamelCase__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase__ = text_generator(__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCamelCase__ = text_generator(__lowerCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = text_generator.model lowerCamelCase__ = text_generator.tokenizer lowerCamelCase__ = text_generator('''This is a test''' ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCamelCase__ = text_generator('''This is a test''' , return_full_text=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCamelCase__ = pipeline(task='''text-generation''' , model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , return_full_text=__lowerCAmelCase ) lowerCamelCase__ = text_generator('''This is a test''' ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCamelCase__ = text_generator('''This is a test''' , return_full_text=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCamelCase__ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCamelCase__ = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], ] , ) with self.assertRaises(__lowerCAmelCase ): lowerCamelCase__ = text_generator('''test''' , return_full_text=__lowerCAmelCase , return_text=__lowerCAmelCase ) with self.assertRaises(__lowerCAmelCase ): lowerCamelCase__ = text_generator('''test''' , return_full_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) with self.assertRaises(__lowerCAmelCase ): lowerCamelCase__ = text_generator('''test''' , return_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCamelCase__ = text_generator('''''' ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCamelCase__ = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCamelCase__ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0_0_0_0 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 5_0_0 , max_new_tokens=2_0 ) lowerCamelCase__ = text_generator('''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=2_0 ) # Hole strategy cannot work with self.assertRaises(__lowerCAmelCase ): text_generator( '''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 1_0 , ) @require_torch @require_accelerate @require_torch_gpu def __lowerCamelCase ( self ): '''simple docstring''' import torch # Classic `model_kwargs` lowerCamelCase__ = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase__ = pipe('''This is a test''' ) self.assertEqual( __lowerCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCamelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase__ = pipe('''This is a test''' ) self.assertEqual( __lowerCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCamelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCamelCase__ = pipe('''This is a test''' ) self.assertEqual( __lowerCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __lowerCamelCase ( self ): '''simple docstring''' import torch lowerCamelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __lowerCamelCase ( self ): '''simple docstring''' import torch lowerCamelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__lowerCAmelCase , top_p=0.5 ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = '''Hello world''' lowerCamelCase__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCamelCase__ = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCamelCase__ = logging.get_logger('''transformers.generation.utils''' ) lowerCamelCase__ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__lowerCAmelCase ) as cl: lowerCamelCase__ = text_generator(__lowerCAmelCase , max_length=1_0 , max_new_tokens=1 ) self.assertIn(__lowerCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__lowerCAmelCase ) as cl: lowerCamelCase__ = text_generator(__lowerCAmelCase , max_new_tokens=1 ) self.assertNotIn(__lowerCAmelCase , cl.out ) with CaptureLogger(__lowerCAmelCase ) as cl: lowerCamelCase__ = text_generator(__lowerCAmelCase , max_length=1_0 ) self.assertNotIn(__lowerCAmelCase , cl.out )
720
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _a = logging.get_logger(__name__) class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
29
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class __A ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = inspect.getfile(accelerate.test_utils ) lowerCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) lowerCamelCase__ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) lowerCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def __lowerCamelCase ( self ): '''simple docstring''' print(F'Found {torch.cuda.device_count()} devices.' ) lowerCamelCase__ = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() ) @require_multi_gpu def __lowerCamelCase ( self ): '''simple docstring''' print(F'Found {torch.cuda.device_count()} devices.' ) lowerCamelCase__ = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path] print(F'Command: {cmd}' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() ) @require_multi_gpu def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() ) @require_multi_gpu def __lowerCamelCase ( self ): '''simple docstring''' print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' ) lowerCamelCase__ = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() ) if __name__ == "__main__": _a = Accelerator() _a = (accelerator.state.process_index + 2, 10) _a = torch.randint(0, 10, shape).to(accelerator.device) _a = "" _a = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." _a = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." _a = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
721
from queue import PriorityQueue from typing import Any import numpy as np def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int: '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf ) lowerCamelCase__ = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowerCamelCase__ = new_cost_f lowerCamelCase__ = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = -1 lowerCamelCase__ = set() lowerCamelCase__ = set() lowerCamelCase__ = {source: 0} lowerCamelCase__ = {destination: 0} lowerCamelCase__ = {source: None} lowerCamelCase__ = {destination: None} lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase__ , lowerCamelCase__ = queue_forward.get() visited_forward.add(__snake_case ) lowerCamelCase__ , lowerCamelCase__ = queue_backward.get() visited_backward.add(__snake_case ) lowerCamelCase__ = pass_and_relaxation( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) lowerCamelCase__ = pass_and_relaxation( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase__ = shortest_distance return shortest_path_distance _a = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } _a = { "B": [["E", 1]], "C": [["B", 1]], "D": [["C", 1]], "F": [["D", 1], ["G", 1]], "E": [[None, np.inf]], "G": [["E", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
29
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""} lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , ) lowerCamelCase__ = EulerDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) lowerCamelCase__ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , ) lowerCamelCase__ = CLIPTextModel(__lowerCAmelCase ) lowerCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowerCAmelCase ) lowerCamelCase__ = CLIPTextModelWithProjection(__lowerCAmelCase ) lowerCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowerCAmelCase ) lowerCamelCase__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=0 ): '''simple docstring''' lowerCamelCase__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) lowerCamelCase__ = image / 2 + 0.5 if str(__lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase__ = torch.manual_seed(__lowerCAmelCase ) else: lowerCamelCase__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) lowerCamelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.75, } return inputs def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ = self.get_dummy_components() lowerCamelCase__ = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase ) lowerCamelCase__ = sd_pipe.to(__lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) lowerCamelCase__ = self.get_dummy_inputs(__lowerCAmelCase ) lowerCamelCase__ = sd_pipe(**__lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCamelCase__ = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def __lowerCamelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_dummy_components() lowerCamelCase__ = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase ) lowerCamelCase__ = sd_pipe.to(__lowerCAmelCase ) lowerCamelCase__ = sd_pipe.to(__lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) # forward without prompt embeds lowerCamelCase__ = self.get_dummy_inputs(__lowerCAmelCase ) lowerCamelCase__ = 3 * ['''this is a negative prompt'''] lowerCamelCase__ = negative_prompt lowerCamelCase__ = 3 * [inputs['''prompt''']] lowerCamelCase__ = sd_pipe(**__lowerCAmelCase ) lowerCamelCase__ = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCamelCase__ = self.get_dummy_inputs(__lowerCAmelCase ) lowerCamelCase__ = 3 * ['''this is a negative prompt'''] lowerCamelCase__ = 3 * [inputs.pop('''prompt''' )] ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = sd_pipe.encode_prompt(__lowerCAmelCase , negative_prompt=__lowerCAmelCase ) lowerCamelCase__ = sd_pipe( **__lowerCAmelCase , prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , pooled_prompt_embeds=__lowerCAmelCase , negative_pooled_prompt_embeds=__lowerCAmelCase , ) lowerCamelCase__ = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class __A ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase="cpu" , __lowerCAmelCase=torch.floataa , __lowerCAmelCase=0 ): '''simple docstring''' lowerCamelCase__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) lowerCamelCase__ = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 6_4, 6_4) ) lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ) lowerCamelCase__ = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) lowerCamelCase__ = self.get_inputs(__lowerCAmelCase ) lowerCamelCase__ = pipe(**__lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
700
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """ClapFeatureExtractor""" lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if audios is not None: lowerCamelCase__ = self.feature_extractor( __lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and audios is not None: lowerCamelCase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer.model_input_names lowerCamelCase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
29
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _a = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
701
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope lowerCamelCase__ = projection_dim def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFDPRModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' ) lowerCamelCase__ = tf.constant( [[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP] lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ 0.0323_6253, 0.1275_3335, 0.1681_8509, 0.0027_9786, 0.389_6933, 0.2426_4945, 0.217_8971, -0.0233_5227, -0.0848_1959, -0.1432_4117, ] ] ) self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
def lowerCAmelCase__(__snake_case ) -> list[int]: '''simple docstring''' if num <= 0: raise ValueError('''Input must be a positive integer''' ) lowerCamelCase__ = [True] * (num + 1) lowerCamelCase__ = 2 while p * p <= num: if primes[p]: for i in range(p * p ,num + 1 ,__snake_case ): lowerCamelCase__ = False p += 1 return [prime for prime in range(2 ,num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() _a = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
702
import string from math import logaa def lowerCAmelCase__(__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = document.translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' ) lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]: '''simple docstring''' lowerCamelCase__ = corpus.lower().translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with '' lowerCamelCase__ = corpus_without_punctuation.split('''\n''' ) lowerCamelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__snake_case )) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float: '''simple docstring''' if smoothing: if n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError('''df must be > 0''' ) elif n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(logaa(n / df ) ,3 ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> float: '''simple docstring''' return round(tf * idf ,3 )
29
0
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str: '''simple docstring''' lowerCamelCase__ = BertConfig.from_json_file(__snake_case ) print(F'Building PyTorch model from configuration: {config}' ) lowerCamelCase__ = BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case ,__snake_case ,__snake_case ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() ,__snake_case ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _a = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
703
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
29
0
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass _a = (3, 9, -11, 0, 7, 5, 1, -1) _a = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __A : '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): lowerCamelCase__ = Node(__lowerCAmelCase , self.head ) def __iter__( self ): '''simple docstring''' lowerCamelCase__ = self.head while node: yield node.data lowerCamelCase__ = node.next_node def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __str__( self ): '''simple docstring''' return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> SortedLinkedList: '''simple docstring''' return SortedLinkedList(list(__snake_case ) + list(__snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() _a = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
704
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _a = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' return (abs(source - target ) / target) < 0.0_1 @pytest.mark.integration def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case ) lowerCamelCase__ = TestCommand(*__snake_case ) test_command.run() lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' ) assert os.path.exists(__snake_case ) lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case ) lowerCamelCase__ = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) ,splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] ,download_size=3940680 ,dataset_size=2589981 ,) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case ,__snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes ) else: result == expected
29
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
705
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = 1_3 lowerCamelCase__ = 7 lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = 9_9 lowerCamelCase__ = 3_2 lowerCamelCase__ = 2 lowerCamelCase__ = 4 lowerCamelCase__ = 3_7 lowerCamelCase__ = '''gelu''' lowerCamelCase__ = 0.1 lowerCamelCase__ = 0.1 lowerCamelCase__ = 5_1_2 lowerCamelCase__ = 1_6 lowerCamelCase__ = 2 lowerCamelCase__ = 0.02 lowerCamelCase__ = 3 lowerCamelCase__ = 4 lowerCamelCase__ = None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): '''simple docstring''' ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = self.prepare_config_and_inputs() lowerCamelCase__ = True lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = True lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''encoder_hidden_states''': encoder_hidden_states, '''encoder_attention_mask''': encoder_attention_mask, } lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ) # Also check the case where encoder outputs are not passed lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase ) lowerCamelCase__ = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.num_labels lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__lowerCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCamelCase__ = model.get_bias() assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) for k, v in name.items(): assert isinstance(__lowerCAmelCase , tf.Variable ) else: lowerCamelCase__ = model.get_output_embeddings() assert x is None lowerCamelCase__ = model.get_bias() assert name is None @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] lowerCamelCase__ = [1, 6, 3_3] self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase ) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() _a = logging.get_logger("transformers.models.speecht5") _a = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } _a = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } _a = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } _a = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } _a = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } _a = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } _a = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } _a = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } _a = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } _a = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _a = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _a = [] _a = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] _a = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] _a = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] _a = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[int]: '''simple docstring''' for attribute in key.split('''.''' ): lowerCamelCase__ = getattr(__snake_case ,__snake_case ) if weight_type is not None: lowerCamelCase__ = getattr(__snake_case ,__snake_case ).shape else: lowerCamelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": lowerCamelCase__ = value elif weight_type == "weight_g": lowerCamelCase__ = value elif weight_type == "weight_v": lowerCamelCase__ = value elif weight_type == "bias": lowerCamelCase__ = value elif weight_type == "running_mean": lowerCamelCase__ = value elif weight_type == "running_var": lowerCamelCase__ = value elif weight_type == "num_batches_tracked": lowerCamelCase__ = value else: lowerCamelCase__ = value logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]: '''simple docstring''' for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCamelCase__ , lowerCamelCase__ = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' lowerCamelCase__ = [] if task == "s2t": lowerCamelCase__ = hf_model.speechta.encoder.prenet.feature_encoder lowerCamelCase__ = MAPPING_S2T lowerCamelCase__ = IGNORE_KEYS_S2T elif task == "t2s": lowerCamelCase__ = None lowerCamelCase__ = MAPPING_T2S lowerCamelCase__ = IGNORE_KEYS_T2S elif task == "s2s": lowerCamelCase__ = hf_model.speechta.encoder.prenet.feature_encoder lowerCamelCase__ = MAPPING_S2S lowerCamelCase__ = IGNORE_KEYS_S2S else: raise ValueError(F'Unsupported task: {task}' ) for name, value in fairseq_dict.items(): if should_ignore(__snake_case ,__snake_case ): logger.info(F'{name} was ignored' ) continue lowerCamelCase__ = False if "conv_layers" in name: load_conv_layer( __snake_case ,__snake_case ,__snake_case ,__snake_case ,hf_model.config.feat_extract_norm == '''group''' ,) lowerCamelCase__ = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: lowerCamelCase__ , lowerCamelCase__ = key.split('''.*.''' ) if prefix in name and suffix in name: lowerCamelCase__ = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: lowerCamelCase__ = True if "*" in mapped_key: lowerCamelCase__ = name.split(__snake_case )[0].split('''.''' )[-2] lowerCamelCase__ = mapped_key.replace('''*''' ,__snake_case ) if "weight_g" in name: lowerCamelCase__ = '''weight_g''' elif "weight_v" in name: lowerCamelCase__ = '''weight_v''' elif "bias" in name: lowerCamelCase__ = '''bias''' elif "weight" in name: lowerCamelCase__ = '''weight''' elif "running_mean" in name: lowerCamelCase__ = '''running_mean''' elif "running_var" in name: lowerCamelCase__ = '''running_var''' elif "num_batches_tracked" in name: lowerCamelCase__ = '''num_batches_tracked''' else: lowerCamelCase__ = None set_recursively(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(F'Unused weights: {unused_weights}' ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = full_name.split('''conv_layers.''' )[-1] lowerCamelCase__ = name.split('''.''' ) lowerCamelCase__ = int(items[0] ) lowerCamelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) lowerCamelCase__ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) lowerCamelCase__ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) lowerCamelCase__ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) lowerCamelCase__ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__snake_case ) @torch.no_grad() def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=None ,) -> str: '''simple docstring''' if config_path is not None: lowerCamelCase__ = SpeechTaConfig.from_pretrained(__snake_case ) else: lowerCamelCase__ = SpeechTaConfig() if task == "s2t": lowerCamelCase__ = config.max_text_positions lowerCamelCase__ = SpeechTaForSpeechToText(__snake_case ) elif task == "t2s": lowerCamelCase__ = 1876 lowerCamelCase__ = 600 lowerCamelCase__ = config.max_speech_positions lowerCamelCase__ = SpeechTaForTextToSpeech(__snake_case ) elif task == "s2s": lowerCamelCase__ = 1876 lowerCamelCase__ = config.max_speech_positions lowerCamelCase__ = SpeechTaForSpeechToSpeech(__snake_case ) else: raise ValueError(F'Unknown task name: {task}' ) if vocab_path: lowerCamelCase__ = SpeechTaTokenizer(__snake_case ,model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken('''<mask>''' ,lstrip=__snake_case ,rstrip=__snake_case ) lowerCamelCase__ = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) lowerCamelCase__ = SpeechTaFeatureExtractor() lowerCamelCase__ = SpeechTaProcessor(tokenizer=__snake_case ,feature_extractor=__snake_case ) processor.save_pretrained(__snake_case ) lowerCamelCase__ = torch.load(__snake_case ) recursively_load_weights(fairseq_checkpoint['''model'''] ,__snake_case ,__snake_case ) model.save_pretrained(__snake_case ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(__snake_case ) model.push_to_hub(__snake_case ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) _a = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
706
from math import sqrt def lowerCAmelCase__(__snake_case ) -> bool: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' must been an int and positive" lowerCamelCase__ = True # 0 and 1 are none primes. if number <= 1: lowerCamelCase__ = False for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCamelCase__ = False break # precondition assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool" return status def lowerCAmelCase__(__snake_case ) -> Any: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCamelCase__ = list(range(2 ,n + 1 ) ) lowerCamelCase__ = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(__snake_case ) ): for j in range(i + 1 ,len(__snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCamelCase__ = 0 # filters actual prime numbers. lowerCamelCase__ = [x for x in begin_list if x != 0] # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2" lowerCamelCase__ = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 ,n + 1 ): if is_prime(__snake_case ): ans.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0" lowerCamelCase__ = [] # this list will be returns of the function. # potential prime number factors. lowerCamelCase__ = 2 lowerCamelCase__ = number if number == 0 or number == 1: ans.append(__snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(__snake_case ): while quotient != 1: if is_prime(__snake_case ) and (quotient % factor == 0): ans.append(__snake_case ) quotient /= factor else: factor += 1 else: ans.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list" return ans def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCamelCase__ = 0 # prime factorization of 'number' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = max(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int" return ans def lowerCAmelCase__(__snake_case ) -> Dict: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCamelCase__ = 0 # prime factorization of 'number' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = min(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int" return ans def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase__(__snake_case ) -> List[Any]: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case ) ), "'number' must been an int, even and > 2" lowerCamelCase__ = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCamelCase__ = get_prime_numbers(__snake_case ) lowerCamelCase__ = len(__snake_case ) # run variable for while-loops. lowerCamelCase__ = 0 lowerCamelCase__ = None # exit variable. for break up the loops lowerCamelCase__ = True while i < len_pn and loop: lowerCamelCase__ = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCamelCase__ = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(__snake_case ,__snake_case ) and (len(__snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase__(__snake_case ,__snake_case ) -> str: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCamelCase__ = 0 while numbera != 0: lowerCamelCase__ = numbera % numbera lowerCamelCase__ = numbera lowerCamelCase__ = rest # precondition assert isinstance(__snake_case ,__snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCamelCase__ = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCamelCase__ = prime_factorization(__snake_case ) lowerCamelCase__ = prime_factorization(__snake_case ) elif numbera == 1 or numbera == 1: lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = max(__snake_case ,__snake_case ) lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCamelCase__ = prime_fac_a.count(__snake_case ) lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(max(__snake_case ,__snake_case ) ): ans *= n else: lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCamelCase__ = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # precondition assert isinstance(__snake_case ,__snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int" lowerCamelCase__ = 0 lowerCamelCase__ = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(__snake_case ): ans += 1 # precondition assert isinstance(__snake_case ,__snake_case ) and is_prime( __snake_case ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict: '''simple docstring''' assert ( is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCamelCase__ = p_number_a + 1 # jump to the next number lowerCamelCase__ = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(__snake_case ): number += 1 while number < p_number_a: ans.append(__snake_case ) number += 1 # fetch the next prime number. while not is_prime(__snake_case ): number += 1 # precondition assert ( isinstance(__snake_case ,__snake_case ) and ans[0] != p_number_a and ans[len(__snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1" lowerCamelCase__ = [] # will be returned. for divisor in range(1 ,n + 1 ): if n % divisor == 0: ans.append(__snake_case ) # precondition assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCamelCase__ = get_divisors(__snake_case ) # precondition assert ( isinstance(__snake_case ,__snake_case ) and (divisors[0] == 1) and (divisors[len(__snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple: '''simple docstring''' assert ( isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) ) # precondition assert ( isinstance(__snake_case ,__snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase__(__snake_case ) -> Optional[int]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0" lowerCamelCase__ = 1 # this will be return. for factor in range(1 ,n + 1 ): ans *= factor return ans def lowerCAmelCase__(__snake_case ) -> Optional[Any]: '''simple docstring''' assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0" lowerCamelCase__ = 0 lowerCamelCase__ = 1 lowerCamelCase__ = 1 # this will be return for _ in range(n - 1 ): lowerCamelCase__ = ans ans += fiba lowerCamelCase__ = tmp return ans
29
0
def lowerCAmelCase__(__snake_case ) -> str: '''simple docstring''' return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
707
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None: '''simple docstring''' if start is None: lowerCamelCase__ = 0 if end is None: lowerCamelCase__ = len(__snake_case ) - 1 if start >= end: return lowerCamelCase__ = (start + end) // 2 slowsort(__snake_case ,__snake_case ,__snake_case ) slowsort(__snake_case ,mid + 1 ,__snake_case ) if sequence[end] < sequence[mid]: lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end] slowsort(__snake_case ,__snake_case ,end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
29
0
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __A ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = DebertaTokenizer lowerCAmelCase_ = True lowerCAmelCase_ = DebertaTokenizerFast def __lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase__ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] lowerCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) lowerCamelCase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase__ = {'''unk_token''': '''[UNK]'''} lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowerCAmelCase ) ) def __lowerCamelCase ( self , **__lowerCAmelCase ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = '''lower newer''' lowerCamelCase__ = '''lower newer''' return input_text, output_text def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = '''lower newer''' lowerCamelCase__ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowerCamelCase__ = tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = tokens + [tokenizer.unk_token] lowerCamelCase__ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = tokenizer('''Hello''' , '''World''' ) lowerCamelCase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: lowerCamelCase__ = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase__ = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] lowerCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase ) lowerCamelCase__ = [tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) for seq in encoding['''input_ids''']] # fmt: off lowerCamelCase__ = { '''input_ids''': [ [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on lowerCamelCase__ = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __lowerCAmelCase ) for expected, decoded in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
708
from __future__ import annotations def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float: '''simple docstring''' if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float: '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float: '''simple docstring''' if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( __snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
29
0
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
709
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' def wrapper(*__snake_case ,**__snake_case ): lowerCamelCase__ = timeit.default_timer() lowerCamelCase__ = func(*__snake_case ,**__snake_case ) lowerCamelCase__ = timeit.default_timer() - starttime return delta lowerCamelCase__ = func.__name__ return wrapper def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = [] lowerCamelCase__ = seq_shapes or {} for i in range(__snake_case ): lowerCamelCase__ = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__snake_case ,_ArrayXD ): lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__snake_case ,datasets.Value ): if v.dtype == "string": lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.''' else: lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item() elif isinstance(__snake_case ,datasets.Sequence ): while isinstance(__snake_case ,datasets.Sequence ): lowerCamelCase__ = v.feature lowerCamelCase__ = seq_shapes[k] lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype ) lowerCamelCase__ = data dummy_data.append((i, example) ) return dummy_data def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str: '''simple docstring''' lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case ) with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer: for key, record in dummy_data: lowerCamelCase__ = features.encode_example(__snake_case ) writer.write(__snake_case ) lowerCamelCase__ , lowerCamelCase__ = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' ) lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) ) return dataset
29
0
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _a = "2.13.1" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _a = concatenate_datasets _a = DownloadConfig _a = DownloadManager _a = DownloadMode _a = DownloadConfig _a = DownloadMode _a = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
710
def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 ,len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase__ = grid[0] for row_n in range(1 ,len(__snake_case ) ): lowerCamelCase__ = grid[row_n] lowerCamelCase__ = fill_row(__snake_case ,__snake_case ) lowerCamelCase__ = grid[row_n] return grid[-1][-1] def lowerCAmelCase__(__snake_case ,__snake_case ) -> list: '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 ,len(__snake_case ) ): current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
29
0