code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'sew-d'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Optional[int] = feat_extract_norm
lowerCAmelCase__ : str = feat_extract_activation
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : Any = list(__UpperCAmelCase )
lowerCAmelCase__ : int = conv_bias
lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings
lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups
lowerCAmelCase__ : int = len(self.conv_dim )
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : int = squeeze_factor
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Any = position_buckets
lowerCAmelCase__ : Optional[int] = share_att_key
lowerCAmelCase__ : Tuple = relative_attention
lowerCAmelCase__ : Optional[int] = norm_rel_ebd
lowerCAmelCase__ : Tuple = list(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_dropout
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : List[Any] = feat_proj_dropout
lowerCAmelCase__ : Any = final_dropout
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = feature_layer_norm_eps
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Tuple = apply_spec_augment
lowerCAmelCase__ : List[str] = mask_time_prob
lowerCAmelCase__ : int = mask_time_length
lowerCAmelCase__ : int = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : int = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# ctc loss
lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# sequence classification
lowerCAmelCase__ : Tuple = use_weighted_layer_sum
lowerCAmelCase__ : Dict = classifier_proj_size
@property
def __magic_name__( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 678 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Union[str, Any] = seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Union[str, Any] = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : List[str] = embedding_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Optional[Any] = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[str] = num_choices
lowerCAmelCase__ : Any = scope
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : str = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
A__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
# test_resize_embeddings = False
A__ = False
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def __magic_name__( self ):
lowerCAmelCase__ : str = MegatronBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __magic_name__( self ):
self.config_tester.run_common_tests()
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
return torch.tensor(
UpperCamelCase , dtype=torch.long , device=UpperCamelCase , )
lowerCAmelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.half()
lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0]
lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj]
lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj]
lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
| 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'bart'
A__ = ['past_key_values']
A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : int = d_model
lowerCAmelCase__ : str = encoder_ffn_dim
lowerCAmelCase__ : Any = encoder_layers
lowerCAmelCase__ : Dict = encoder_attention_heads
lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCAmelCase__ : Union[str, Any] = decoder_layers
lowerCAmelCase__ : Any = decoder_attention_heads
lowerCAmelCase__ : Tuple = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : Any = activation_dropout
lowerCAmelCase__ : Optional[Any] = activation_function
lowerCAmelCase__ : Union[str, Any] = init_std
lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase__ : int = decoder_layerdrop
lowerCAmelCase__ : Optional[int] = classifier_dropout
lowerCAmelCase__ : str = use_cache
lowerCAmelCase__ : int = encoder_layers
lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
lowerCAmelCase__ : str = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class _lowerCAmelCase ( _lowercase ):
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ : Any = {0: '''batch'''}
lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Dict = super().outputs
else:
lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
lowerCAmelCase__ : int = seq_length if not self.use_past else 1
lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape
lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads
lowerCAmelCase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : List[Any] = decoder_seq_length + 3
lowerCAmelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase__ : Any = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers
lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : List[str] = seqlen + 2
lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads
lowerCAmelCase__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype
lowerCAmelCase__ : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[Any] = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Tuple = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
lowerCAmelCase__ : int = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 678 | 1 |
from scipy.stats import pearsonr
import datasets
lowerCAmelCase_ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCAmelCase_ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCAmelCase_ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def __magic_name__( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
if return_pvalue:
lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
| 678 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'sew-d'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Optional[int] = feat_extract_norm
lowerCAmelCase__ : str = feat_extract_activation
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : Any = list(__UpperCAmelCase )
lowerCAmelCase__ : int = conv_bias
lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings
lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups
lowerCAmelCase__ : int = len(self.conv_dim )
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : int = squeeze_factor
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Any = position_buckets
lowerCAmelCase__ : Optional[int] = share_att_key
lowerCAmelCase__ : Tuple = relative_attention
lowerCAmelCase__ : Optional[int] = norm_rel_ebd
lowerCAmelCase__ : Tuple = list(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_dropout
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : List[Any] = feat_proj_dropout
lowerCAmelCase__ : Any = final_dropout
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = feature_layer_norm_eps
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Tuple = apply_spec_augment
lowerCAmelCase__ : List[str] = mask_time_prob
lowerCAmelCase__ : int = mask_time_length
lowerCAmelCase__ : int = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : int = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# ctc loss
lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# sequence classification
lowerCAmelCase__ : Tuple = use_weighted_layer_sum
lowerCAmelCase__ : Dict = classifier_proj_size
@property
def __magic_name__( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 678 | 1 |
def __lowerCAmelCase ( UpperCamelCase ) -> str:
return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] )
def __lowerCAmelCase ( UpperCamelCase ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(UpperCamelCase ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = '''</s>'''
lowerCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__UpperCAmelCase ) , 1103 )
def __magic_name__( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.'''
lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example''']
lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[int] = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def __magic_name__( self ):
# fmt: off
lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : str = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Tuple = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(
__UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 678 | 1 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=4 , ):
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : Any = seq_length
lowerCAmelCase__ : Union[str, Any] = is_training
lowerCAmelCase__ : Union[str, Any] = use_attention_mask
lowerCAmelCase__ : Union[str, Any] = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : str = num_choices
def __magic_name__( self ):
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Optional[Any] = None
if self.use_attention_mask:
lowerCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : int = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = config_and_inputs
lowerCAmelCase__ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = True
A__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__( self ):
lowerCAmelCase__ : int = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __magic_name__( self ):
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Any = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__UpperCAmelCase )
lowerCAmelCase__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __magic_name__( self ):
lowerCAmelCase__ : str = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowerCAmelCase__ : Any = model(__UpperCAmelCase )[0]
lowerCAmelCase__ : Union[str, Any] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __UpperCAmelCase )
# compare the actual values for a slice.
lowerCAmelCase__ : Optional[int] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__UpperCAmelCase )
lowerCAmelCase__ : int = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase__ : Any = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 678 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'donut-swin'
A__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : List[str] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Optional[Any] = embed_dim
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : str = mlp_ratio
lowerCAmelCase__ : Optional[int] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[str] = use_absolute_embeddings
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 678 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : Any = 3
lowerCAmelCase__ : Optional[int] = (32, 32)
lowerCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def __magic_name__( self ):
torch.manual_seed(0 )
lowerCAmelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def __magic_name__( self ):
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __magic_name__( self ):
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.dummy_cond_unet_upscale
lowerCAmelCase__ : Any = DDPMScheduler()
lowerCAmelCase__ : Any = DDIMScheduler(prediction_type='''v_prediction''' )
lowerCAmelCase__ : str = self.dummy_vae
lowerCAmelCase__ : Tuple = self.dummy_text_encoder
lowerCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase__ : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : Any = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : int = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
lowerCAmelCase__ : Dict = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase__ : Dict = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase__ : List[str] = output.images
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase__ : str = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__UpperCAmelCase , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = image_from_tuple[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCAmelCase__ : Optional[int] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__( self ):
lowerCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : List[Any] = self.dummy_cond_unet_upscale
lowerCAmelCase__ : Optional[Any] = DDPMScheduler()
lowerCAmelCase__ : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Optional[int] = self.dummy_vae
lowerCAmelCase__ : Tuple = self.dummy_text_encoder
lowerCAmelCase__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase__ : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : List[Any] = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : Any = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
lowerCAmelCase__ : List[str] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : Tuple = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase__ : Optional[int] = output.images
assert image.shape[0] == 2
lowerCAmelCase__ : int = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase__ : Dict = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase__ : Dict = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self.dummy_cond_unet_upscale
lowerCAmelCase__ : str = DDPMScheduler()
lowerCAmelCase__ : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Any = self.dummy_vae
lowerCAmelCase__ : Optional[Any] = self.dummy_text_encoder
lowerCAmelCase__ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase__ : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : Tuple = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCAmelCase__ : Any = unet.half()
lowerCAmelCase__ : int = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : Any = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
lowerCAmelCase__ : int = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Dict = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : str = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , ).images
lowerCAmelCase__ : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowerCAmelCase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
lowerCAmelCase__ : Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCAmelCase__ : Any = StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ : int = '''a cat sitting on a park bench'''
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''np''' , )
lowerCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowerCAmelCase__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
lowerCAmelCase__ : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCAmelCase__ : Dict = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ : List[Any] = '''a cat sitting on a park bench'''
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''np''' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __magic_name__( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowerCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCAmelCase__ : int = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ : str = '''a cat sitting on a park bench'''
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
lowerCAmelCase__ : Any = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type='''np''' , )
lowerCAmelCase__ : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 678 |
lowerCAmelCase_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCAmelCase_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' )
lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' )
lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : Tuple = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : List[Any] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase__ : int = 1
if from_exponent > to_exponent:
lowerCAmelCase__ : List[str] = from_exponent - to_exponent
else:
lowerCAmelCase__ : Dict = -(to_exponent - from_exponent)
return value * pow(10 , UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 678 | 1 |
import sys
import turtle
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
lowerCAmelCase_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
lowerCAmelCase_ = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 678 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__UpperCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 678 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 |
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
# Return True if there is node that has not iterated.
lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase )
lowerCAmelCase__ : Tuple = [s]
lowerCAmelCase__ : Dict = True
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = u
return visited[t]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase ))
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = float('''Inf''' )
lowerCAmelCase__ : Dict = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] )
lowerCAmelCase__ : List[Any] = parent[s]
max_flow += path_flow
lowerCAmelCase__ : List[Any] = sink
while v != source:
lowerCAmelCase__ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase__ : Optional[Any] = parent[v]
for i in range(len(UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 678 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowerCAmelCase ( _lowercase ):
A__ = ''
A__ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
super().__init__(self , **__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = repo_info
lowerCAmelCase__ : Any = token
lowerCAmelCase__ : Dict = None
def __magic_name__( self ):
if self.dir_cache is None:
lowerCAmelCase__ : Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCAmelCase__ : Optional[Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__UpperCAmelCase ): {'''name''': str(__UpperCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , **__UpperCAmelCase , ):
if not isinstance(self.repo_info , __UpperCAmelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
lowerCAmelCase__ : str = hf_hub_url(self.repo_info.id , __UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCAmelCase , mode=__UpperCAmelCase , headers=get_authentication_headers_for_url(__UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def __magic_name__( self , __UpperCAmelCase , **__UpperCAmelCase ):
self._get_dirs()
lowerCAmelCase__ : int = self._strip_protocol(__UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase ):
self._get_dirs()
lowerCAmelCase__ : List[str] = PurePosixPath(path.strip('''/''' ) )
lowerCAmelCase__ : List[str] = {}
for p, f in self.dir_cache.items():
lowerCAmelCase__ : Dict = PurePosixPath(p.strip('''/''' ) )
lowerCAmelCase__ : Any = p.parent
if root == path:
lowerCAmelCase__ : Any = f
lowerCAmelCase__ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 678 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
A__ = MODEL_FOR_CAUSAL_LM_MAPPING
A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id
lowerCAmelCase__ : List[Any] = '''<pad>'''
lowerCAmelCase__ : List[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def __magic_name__( self ):
lowerCAmelCase__ : Any = '''Hello I believe in'''
lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator.model
lowerCAmelCase__ : Optional[int] = text_generator.tokenizer
lowerCAmelCase__ : Tuple = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ : List[str] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ : str = text_generator('''''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
# Classic `model_kwargs`
lowerCAmelCase__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''Hello world'''
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 678 | 1 |
from math import isqrt, loga
def __lowerCAmelCase ( UpperCamelCase ) -> list[int]:
lowerCAmelCase__ : List[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : str = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def __lowerCAmelCase ( UpperCamelCase = 800800 , UpperCamelCase = 800800 ) -> int:
lowerCAmelCase__ : List[Any] = degree * loga(UpperCamelCase )
lowerCAmelCase__ : List[Any] = int(UpperCamelCase )
lowerCAmelCase__ : str = calculate_prime_numbers(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : int = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 678 |
def __lowerCAmelCase ( UpperCamelCase ) -> str:
return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] )
def __lowerCAmelCase ( UpperCamelCase ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(UpperCamelCase ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'vit'
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : List[Any] = qkv_bias
lowerCAmelCase__ : List[str] = encoder_stride
class _lowerCAmelCase ( _lowercase ):
A__ = version.parse('1.11' )
@property
def __magic_name__( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __magic_name__( self ):
return 1e-4
| 678 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _lowerCAmelCase ( _lowercase ):
A__ = (DPMSolverSDEScheduler,)
A__ = 10
def __magic_name__( self , **__UpperCAmelCase ):
lowerCAmelCase__ : Dict = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__UpperCAmelCase )
return config
def __magic_name__( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def __magic_name__( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase )
def __magic_name__( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def __magic_name__( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = output.prev_sample
lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 678 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( _lowercase ):
A__ = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = size if size is not None else {'''shortest_edge''': 224}
lowerCAmelCase__ : str = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase__ : Optional[int] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
lowerCAmelCase__ : Optional[Any] = do_resize
lowerCAmelCase__ : Tuple = size
lowerCAmelCase__ : Optional[Any] = resample
lowerCAmelCase__ : Dict = do_center_crop
lowerCAmelCase__ : Optional[Any] = crop_size
lowerCAmelCase__ : Union[str, Any] = do_rescale
lowerCAmelCase__ : Optional[Any] = rescale_factor
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : Dict = do_convert_rgb
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
lowerCAmelCase__ : int = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
lowerCAmelCase__ : Optional[int] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
lowerCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : str = size if size is not None else self.size
lowerCAmelCase__ : str = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ : Dict = resample if resample is not None else self.resample
lowerCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Any = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Optional[Any] = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Union[str, Any] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Dict = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Any = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase__ : Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Optional[Any] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ : Union[str, Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ : Tuple = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
lowerCAmelCase__ : Dict = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase__ : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 678 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = 3
lowerCAmelCase__ : Tuple = 250
lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : List[str] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__UpperCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
| 678 | 1 |
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Dict:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=0 ) -> Any:
return sorted(UpperCamelCase , key=lambda UpperCamelCase : x[column] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=float('''inf''' ) ) -> Any:
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase__ : List[Any] = current_dis
return min_dis
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=float('''inf''' ) ) -> Optional[Any]:
for i in range(min(6 , points_counts - 1 ) , UpperCamelCase ):
for j in range(max(0 , i - 6 ) , UpperCamelCase ):
lowerCAmelCase__ : Any = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase__ : Dict = current_dis
return min_dis
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
# base case
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase , UpperCamelCase )
# recursion
lowerCAmelCase__ : Any = points_counts // 2
lowerCAmelCase__ : Tuple = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[:mid] , UpperCamelCase )
lowerCAmelCase__ : int = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[mid:] , points_counts - mid )
lowerCAmelCase__ : Optional[int] = min(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCamelCase )
lowerCAmelCase__ : List[str] = dis_between_closest_in_strip(
UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase )
return min(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : str = column_based_sort(UpperCamelCase , column=0 )
lowerCAmelCase__ : List[Any] = column_based_sort(UpperCamelCase , column=1 )
return (
closest_pair_of_points_sqr(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 678 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowerCAmelCase ( UpperCamelCase = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(UpperCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 678 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __lowerCAmelCase ( ) -> Dict:
lowerCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=UpperCamelCase , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=UpperCamelCase , default=5 )
parser.add_argument('''--batch_size''' , type=UpperCamelCase , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=UpperCamelCase , default=1 )
parser.add_argument('''--freeze''' , type=UpperCamelCase , default=UpperCamelCase )
parser.add_argument('''--learning_rate''' , type=UpperCamelCase , default=5E-4 )
parser.add_argument('''--seed''' , type=UpperCamelCase , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=UpperCamelCase , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=UpperCamelCase , default=10 )
parser.add_argument('''--weight_decay''' , type=UpperCamelCase , default=0.01 )
parser.add_argument('''--output_dir''' , type=UpperCamelCase , default='''./results''' )
return parser.parse_args()
lowerCAmelCase_ = load("""accuracy""")
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = eval_pred
lowerCAmelCase__ : Optional[Any] = np.argmax(UpperCamelCase , axis=1 )
return metric.compute(predictions=UpperCamelCase , references=UpperCamelCase )
class _lowerCAmelCase ( _lowercase ):
def __init__( self , __UpperCAmelCase ):
super().__init__()
lowerCAmelCase__ : List[Any] = trainer
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
if control.should_evaluate:
lowerCAmelCase__ : Optional[Any] = deepcopy(__UpperCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def __lowerCAmelCase ( ) -> List[Any]:
lowerCAmelCase__ : List[str] = get_args()
set_seed(args.seed )
lowerCAmelCase__ : List[str] = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
lowerCAmelCase__ : Optional[Any] = dataset.train_test_split(test_size=0.2 )
lowerCAmelCase__ : Union[str, Any] = train_test['''test'''].train_test_split(test_size=0.5 )
lowerCAmelCase__ : Union[str, Any] = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCAmelCase__ : Tuple = tokenizer.eos_token
lowerCAmelCase__ : Dict = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowerCAmelCase__ : Optional[Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Any = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(UpperCamelCase ):
lowerCAmelCase__ : Optional[int] = tokenizer(example['''src'''] , truncation=UpperCamelCase , max_length=1024 )
lowerCAmelCase__ : Tuple = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCAmelCase__ : List[str] = train_test_validation.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=train_test_validation['''train'''].column_names , )
lowerCAmelCase__ : str = DataCollatorWithPadding(tokenizer=UpperCamelCase )
lowerCAmelCase__ : Tuple = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
lowerCAmelCase__ : Optional[Any] = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , compute_metrics=UpperCamelCase , )
print('''Training...''' )
trainer.add_callback(CustomCallback(UpperCamelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 678 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
lowerCAmelCase__ : int = R'''\w+[.]\d+'''
lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase )
for pat in pats:
lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) )
return key
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) )
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase__ : str = rename_key(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase )
return unflatten_dict(UpperCamelCase )
| 678 | 1 |
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCAmelCase__ : List[str] = _modexpt(UpperCamelCase , exponent // 2 , UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase , exponent - 1 , UpperCamelCase )) % modulo_value
def __lowerCAmelCase ( UpperCamelCase = 1777 , UpperCamelCase = 1855 , UpperCamelCase = 8 ) -> int:
lowerCAmelCase__ : Any = base
for _ in range(1 , UpperCamelCase ):
lowerCAmelCase__ : Tuple = _modexpt(UpperCamelCase , UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 | 1 |
def __lowerCAmelCase ( UpperCamelCase ) -> str:
return " ".join(
''''''.join(word[::-1] ) if len(UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 678 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCAmelCase_ = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def __magic_name__( cls ):
lowerCAmelCase__ : Dict = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def __magic_name__( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Any = flatten_dict(modela.params )
lowerCAmelCase__ : List[str] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowerCAmelCase__ : Optional[Any] = False
return models_are_equal
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase )
lowerCAmelCase__ : Dict = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = '''bert'''
lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = '''bert'''
lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 678 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'rwkv'
A__ = {'max_position_embeddings': 'context_length'}
def __init__( self , __UpperCAmelCase=5_0277 , __UpperCAmelCase=1024 , __UpperCAmelCase=4096 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=6 , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Union[str, Any] = context_length
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCAmelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCAmelCase__ : Optional[Any] = layer_norm_epsilon
lowerCAmelCase__ : List[str] = rescale_every
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : Tuple = bos_token_id
lowerCAmelCase__ : List[str] = eos_token_id
super().__init__(
tie_word_embeddings=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 678 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : Optional[Any] = 0
if start < end:
lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : List[Any] = a[pivot]
lowerCAmelCase__ : str = temp
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 )
count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase )
return count
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = a[end]
lowerCAmelCase__ : Optional[int] = a[pivot]
lowerCAmelCase__ : List[str] = temp
lowerCAmelCase__ : str = start - 1
for index in range(UpperCamelCase , UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ : List[str] = new_pivot_index + 1
lowerCAmelCase__ : int = a[new_pivot_index]
lowerCAmelCase__ : int = a[index]
lowerCAmelCase__ : Tuple = temp
lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1]
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : Union[str, Any] = temp
return new_pivot_index + 1, count
lowerCAmelCase_ = TemporaryFile()
lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation
lowerCAmelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase_ = np.load(outfile)
lowerCAmelCase_ = len(M) - 1
lowerCAmelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 678 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'poolformer'
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=4.0 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[64, 128, 320, 512] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[2, 1, 1, 1] , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ):
lowerCAmelCase__ : List[Any] = num_channels
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : str = stride
lowerCAmelCase__ : Any = padding
lowerCAmelCase__ : Dict = pool_size
lowerCAmelCase__ : List[str] = hidden_sizes
lowerCAmelCase__ : Any = mlp_ratio
lowerCAmelCase__ : List[str] = depths
lowerCAmelCase__ : Tuple = patch_sizes
lowerCAmelCase__ : List[Any] = strides
lowerCAmelCase__ : int = num_encoder_blocks
lowerCAmelCase__ : Dict = drop_path_rate
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Dict = use_layer_scale
lowerCAmelCase__ : Any = layer_scale_init_value
lowerCAmelCase__ : Optional[Any] = initializer_range
super().__init__(**__UpperCAmelCase )
class _lowerCAmelCase ( _lowercase ):
A__ = version.parse('1.11' )
@property
def __magic_name__( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __magic_name__( self ):
return 2e-3
| 678 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : str = features.copy() if features else default_expected_features
lowerCAmelCase__ : List[Any] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = [parquet_path]
lowerCAmelCase__ : int = tmp_path / '''cache'''
lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCAmelCase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase__ : Optional[int] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
if split:
lowerCAmelCase__ : Tuple = {split: parquet_path}
else:
lowerCAmelCase__ : int = '''train'''
lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ : int = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ : Dict = {'''image''': [image_path]}
lowerCAmelCase__ : int = Features({'''image''': Image()} )
lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
assert get_writer_batch_size(UpperCamelCase ) == expected
| 678 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 678 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class _lowerCAmelCase ( _lowercase , _lowercase ):
A__ = 'focalnet'
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : Optional[int] = use_conv_embed
lowerCAmelCase__ : Optional[int] = hidden_sizes
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : Dict = focal_levels
lowerCAmelCase__ : int = focal_windows
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = mlp_ratio
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = drop_path_rate
lowerCAmelCase__ : Tuple = use_layerscale
lowerCAmelCase__ : List[Any] = layerscale_value
lowerCAmelCase__ : Dict = use_post_layernorm
lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation
lowerCAmelCase__ : Dict = normalize_modulator
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Tuple = encoder_stride
lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 678 | 1 |
from __future__ import annotations
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
lowerCAmelCase__ : int = data
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def __lowerCAmelCase ( UpperCamelCase ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __lowerCAmelCase ( UpperCamelCase ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __lowerCAmelCase ( UpperCamelCase ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __lowerCAmelCase ( ) -> None: # Main function for testing.
lowerCAmelCase__ : Union[str, Any] = Node(1 )
lowerCAmelCase__ : Optional[int] = Node(2 )
lowerCAmelCase__ : int = Node(3 )
lowerCAmelCase__ : Optional[Any] = Node(4 )
lowerCAmelCase__ : Any = Node(5 )
lowerCAmelCase__ : Union[str, Any] = Node(6 )
lowerCAmelCase__ : Any = Node(7 )
lowerCAmelCase__ : Optional[Any] = Node(8 )
lowerCAmelCase__ : Optional[Any] = Node(9 )
print(is_full_binary_tree(UpperCamelCase ) )
print(depth_of_tree(UpperCamelCase ) )
print('''Tree is: ''' )
display(UpperCamelCase )
if __name__ == "__main__":
main()
| 678 |
from scipy.stats import pearsonr
import datasets
lowerCAmelCase_ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCAmelCase_ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCAmelCase_ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def __magic_name__( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
if return_pvalue:
lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
| 678 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
A__ = AltDiffusionPipeline
A__ = TEXT_TO_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_BATCH_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self ):
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ : Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
lowerCAmelCase__ : Tuple = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ : int = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ : List[str] = 77
lowerCAmelCase__ : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
if str(__UpperCAmelCase ).startswith('''mps''' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ : Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __magic_name__( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Dict = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ : Union[str, Any] = RobertaSeriesModelWithTransformation(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = text_encoder
lowerCAmelCase__ : str = AltDiffusionPipeline(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = alt_pipe.to(__UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : str = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = '''A photo of an astronaut'''
lowerCAmelCase__ : List[Any] = alt_pipe(**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = output.images
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Tuple = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : str = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ : Union[str, Any] = RobertaSeriesModelWithTransformation(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = text_encoder
lowerCAmelCase__ : Dict = AltDiffusionPipeline(**__UpperCAmelCase )
lowerCAmelCase__ : str = alt_pipe.to(__UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = alt_pipe(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = output.images
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Tuple = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self ):
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : Any = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = alt_pipe.to(__UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ : str = alt_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ : str = output.images
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ : Union[str, Any] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__( self ):
lowerCAmelCase__ : Any = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = alt_pipe.to(__UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : str = alt_pipe([prompt] , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ : Union[str, Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 678 |
from manim import *
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )]
lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 )
lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 )
lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ : List[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 )
lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 678 | 1 |
import pytest
lowerCAmelCase_ = """__dummy_dataset1__"""
lowerCAmelCase_ = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def __lowerCAmelCase ( ) -> Any:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCAmelCase ( ) -> Tuple:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = dataset_loading_script_name
lowerCAmelCase__ : Dict = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=UpperCamelCase )
lowerCAmelCase__ : Dict = script_dir / F"""{script_name}.py"""
with open(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase )
return str(UpperCamelCase )
| 678 |
import collections
import os
import re
from pathlib import Path
lowerCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase_ = re.compile(R"""^\s*else:""")
def __lowerCAmelCase ( UpperCamelCase ) -> int:
if _re_test_backend.search(UpperCamelCase ) is None:
return None
lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )]
backends.sort()
return "_and_".join(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ : Union[str, Any] = f.readlines()
lowerCAmelCase__ : Tuple = 0
while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase ):
lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0]
lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase )
if single_line_import_search is not None:
lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ : Any = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase__ : str = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_between_brackets.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_quote_object.search(UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ : Any = []
while (
line_index < len(UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase__ : Tuple = lines[line_index]
lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ : Dict = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase__ : Any = lines[line_index]
lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
def find_duplicates(UpperCamelCase ):
return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ : Optional[Any] = []
for key in import_dict_objects.keys():
lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : Dict = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' )
lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase )
if objects is not None:
lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
lowerCAmelCase__ : str = []
for path, directories, files in os.walk(UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' )
submodules.append(UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(UpperCamelCase )
return submodules
lowerCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __lowerCAmelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase )
lowerCAmelCase__ : int = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
lowerCAmelCase__ : str = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) )
lowerCAmelCase__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 678 | 1 |
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def __lowerCAmelCase ( ) -> Optional[int]:
lowerCAmelCase__ : str = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase__ : List[Any] = g.get_repo('''huggingface/diffusers''' )
lowerCAmelCase__ : Optional[Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase__ : Tuple = sorted(issue.get_comments() , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = comments[0] if len(UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 678 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Union[str, Any] = seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Union[str, Any] = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : List[str] = embedding_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Optional[Any] = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[str] = num_choices
lowerCAmelCase__ : Any = scope
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : str = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
A__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
# test_resize_embeddings = False
A__ = False
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def __magic_name__( self ):
lowerCAmelCase__ : str = MegatronBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __magic_name__( self ):
self.config_tester.run_common_tests()
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
return torch.tensor(
UpperCamelCase , dtype=torch.long , device=UpperCamelCase , )
lowerCAmelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.half()
lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0]
lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj]
lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj]
lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
| 678 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=7 ) -> Any:
lowerCAmelCase__ : Any = None
if token is not None:
lowerCAmelCase__ : str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowerCAmelCase__ : Optional[int] = '''636036'''
lowerCAmelCase__ : str = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowerCAmelCase__ : List[Any] = requests.get(UpperCamelCase , headers=UpperCamelCase ).json()
return result["workflow_runs"]
def __lowerCAmelCase ( UpperCamelCase ) -> int:
lowerCAmelCase__ : str = get_daily_ci_runs(UpperCamelCase )
lowerCAmelCase__ : List[str] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCAmelCase__ : Optional[Any] = workflow_run['''id''']
break
return workflow_run_id
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[int] = get_last_daily_ci_runs(UpperCamelCase )
if workflow_run_id is not None:
lowerCAmelCase__ : int = get_artifacts_links(worflow_run_id=UpperCamelCase , token=UpperCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCAmelCase__ : List[str] = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCamelCase , artifact_url=UpperCamelCase , output_dir=UpperCamelCase , token=UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
get_last_daily_ci_artifacts(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = {}
for artifact_name in artifact_names:
lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , F"""{artifact_name}.zip""" )
if os.path.isfile(UpperCamelCase ):
lowerCAmelCase__ : str = {}
with zipfile.ZipFile(UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase ):
# read the file
with z.open(UpperCamelCase ) as f:
lowerCAmelCase__ : List[Any] = f.read().decode('''UTF-8''' )
return results
| 678 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'bart'
A__ = ['past_key_values']
A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : int = d_model
lowerCAmelCase__ : str = encoder_ffn_dim
lowerCAmelCase__ : Any = encoder_layers
lowerCAmelCase__ : Dict = encoder_attention_heads
lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCAmelCase__ : Union[str, Any] = decoder_layers
lowerCAmelCase__ : Any = decoder_attention_heads
lowerCAmelCase__ : Tuple = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : Any = activation_dropout
lowerCAmelCase__ : Optional[Any] = activation_function
lowerCAmelCase__ : Union[str, Any] = init_std
lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase__ : int = decoder_layerdrop
lowerCAmelCase__ : Optional[int] = classifier_dropout
lowerCAmelCase__ : str = use_cache
lowerCAmelCase__ : int = encoder_layers
lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
lowerCAmelCase__ : str = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class _lowerCAmelCase ( _lowercase ):
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ : Any = {0: '''batch'''}
lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Dict = super().outputs
else:
lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
lowerCAmelCase__ : int = seq_length if not self.use_past else 1
lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape
lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads
lowerCAmelCase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : List[Any] = decoder_seq_length + 3
lowerCAmelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase__ : Any = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers
lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : List[str] = seqlen + 2
lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads
lowerCAmelCase__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype
lowerCAmelCase__ : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[Any] = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Tuple = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
lowerCAmelCase__ : int = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 678 | 1 |
import argparse
lowerCAmelCase_ = """docs/source/_static/js/custom.js"""
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
with open(UpperCamelCase , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ : Dict = f.readlines()
lowerCAmelCase__ : Optional[Any] = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
lowerCAmelCase__ : Tuple = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
lowerCAmelCase_ = parser.parse_args()
update_custom_js(args.version)
| 678 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'sew-d'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Optional[int] = feat_extract_norm
lowerCAmelCase__ : str = feat_extract_activation
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : Any = list(__UpperCAmelCase )
lowerCAmelCase__ : int = conv_bias
lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings
lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups
lowerCAmelCase__ : int = len(self.conv_dim )
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : int = squeeze_factor
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Any = position_buckets
lowerCAmelCase__ : Optional[int] = share_att_key
lowerCAmelCase__ : Tuple = relative_attention
lowerCAmelCase__ : Optional[int] = norm_rel_ebd
lowerCAmelCase__ : Tuple = list(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_dropout
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : List[Any] = feat_proj_dropout
lowerCAmelCase__ : Any = final_dropout
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = feature_layer_norm_eps
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Tuple = apply_spec_augment
lowerCAmelCase__ : List[str] = mask_time_prob
lowerCAmelCase__ : int = mask_time_length
lowerCAmelCase__ : int = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : int = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# ctc loss
lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# sequence classification
lowerCAmelCase__ : Tuple = use_weighted_layer_sum
lowerCAmelCase__ : Dict = classifier_proj_size
@property
def __magic_name__( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 678 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Tuple = UniSpeechSatForSequenceClassification.from_pretrained(UpperCamelCase , config=UpperCamelCase )
lowerCAmelCase__ : Any = downstream_dict['''projector.weight''']
lowerCAmelCase__ : List[Any] = downstream_dict['''projector.bias''']
lowerCAmelCase__ : Any = downstream_dict['''model.post_net.linear.weight''']
lowerCAmelCase__ : str = downstream_dict['''model.post_net.linear.bias''']
return model
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : Tuple = UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCamelCase , config=UpperCamelCase )
lowerCAmelCase__ : List[Any] = downstream_dict['''model.linear.weight''']
lowerCAmelCase__ : Union[str, Any] = downstream_dict['''model.linear.bias''']
return model
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : int = UniSpeechSatForXVector.from_pretrained(UpperCamelCase , config=UpperCamelCase )
lowerCAmelCase__ : Dict = downstream_dict['''connector.weight''']
lowerCAmelCase__ : Optional[int] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCAmelCase__ : List[str] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowerCAmelCase__ : Any = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowerCAmelCase__ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
lowerCAmelCase__ : List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
lowerCAmelCase__ : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
lowerCAmelCase__ : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
lowerCAmelCase__ : Dict = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : str = torch.load(UpperCamelCase , map_location='''cpu''' )
lowerCAmelCase__ : Any = checkpoint['''Downstream''']
lowerCAmelCase__ : List[str] = UniSpeechSatConfig.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : str = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase , return_attention_mask=UpperCamelCase , do_normalize=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
lowerCAmelCase__ : Any = convert_classification(UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
lowerCAmelCase__ : Dict = convert_diarization(UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif arch.endswith('''ForXVector''' ):
lowerCAmelCase__ : Any = convert_xvector(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowerCAmelCase__ : List[Any] = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(UpperCamelCase )
hf_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
lowerCAmelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 678 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = '''</s>'''
lowerCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__UpperCAmelCase ) , 1103 )
def __magic_name__( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.'''
lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example''']
lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[int] = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def __magic_name__( self ):
# fmt: off
lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : str = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Tuple = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(
__UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 678 | 1 |
import math
import sys
def __lowerCAmelCase ( UpperCamelCase ) -> str:
lowerCAmelCase__ : Dict = ''''''
try:
with open(UpperCamelCase , '''rb''' ) as binary_file:
lowerCAmelCase__ : str = binary_file.read()
for dat in data:
lowerCAmelCase__ : Optional[int] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __lowerCAmelCase ( UpperCamelCase ) -> str:
lowerCAmelCase__ : Any = {'''0''': '''0''', '''1''': '''1'''}
lowerCAmelCase__ , lowerCAmelCase__ : Dict = '''''', ''''''
lowerCAmelCase__ : Tuple = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase__ : Dict = lexicon[curr_string]
result += last_match_id
lowerCAmelCase__ : Optional[Any] = last_match_id + '''0'''
if math.loga(UpperCamelCase ).is_integer():
lowerCAmelCase__ : int = {}
for curr_key in list(UpperCamelCase ):
lowerCAmelCase__ : Optional[int] = lexicon.pop(UpperCamelCase )
lowerCAmelCase__ : Tuple = new_lex
lowerCAmelCase__ : Dict = last_match_id + '''1'''
index += 1
lowerCAmelCase__ : Tuple = ''''''
return result
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> None:
lowerCAmelCase__ : Optional[int] = 8
try:
with open(UpperCamelCase , '''wb''' ) as opened_file:
lowerCAmelCase__ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __lowerCAmelCase ( UpperCamelCase ) -> str:
lowerCAmelCase__ : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase__ : Optional[Any] = data_bits[counter:]
lowerCAmelCase__ : List[str] = data_bits[counter + 1 :]
return data_bits
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> None:
lowerCAmelCase__ : Optional[int] = read_file_binary(UpperCamelCase )
lowerCAmelCase__ : Any = remove_prefix(UpperCamelCase )
lowerCAmelCase__ : str = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 678 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'donut-swin'
A__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : List[str] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Optional[Any] = embed_dim
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : str = mlp_ratio
lowerCAmelCase__ : Optional[int] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[str] = use_absolute_embeddings
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 678 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'blip_text_model'
def __init__( self , __UpperCAmelCase=3_0524 , __UpperCAmelCase=768 , __UpperCAmelCase=768 , __UpperCAmelCase=3072 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=8 , __UpperCAmelCase=512 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1e-12 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3_0522 , __UpperCAmelCase=2 , __UpperCAmelCase=0 , __UpperCAmelCase=102 , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ):
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , sep_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : List[str] = encoder_hidden_size
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : Optional[Any] = projection_dim
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : int = is_decoder
lowerCAmelCase__ : Any = use_cache
@classmethod
def __magic_name__( cls , __UpperCAmelCase , **__UpperCAmelCase ):
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
lowerCAmelCase__ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class _lowerCAmelCase ( _lowercase ):
A__ = 'blip_vision_model'
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=3072 , __UpperCAmelCase=512 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=384 , __UpperCAmelCase=16 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-10 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : List[Any] = projection_dim
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : str = patch_size
lowerCAmelCase__ : List[Any] = image_size
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : int = attention_dropout
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : Dict = hidden_act
@classmethod
def __magic_name__( cls , __UpperCAmelCase , **__UpperCAmelCase ):
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
lowerCAmelCase__ : Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class _lowerCAmelCase ( _lowercase ):
A__ = 'blip'
A__ = True
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=512 , __UpperCAmelCase=2.6592 , __UpperCAmelCase=256 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
if text_config is None:
lowerCAmelCase__ : str = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
lowerCAmelCase__ : Optional[int] = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
lowerCAmelCase__ : Optional[Any] = BlipTextConfig(**__UpperCAmelCase )
lowerCAmelCase__ : str = BlipVisionConfig(**__UpperCAmelCase )
lowerCAmelCase__ : int = self.vision_config.hidden_size
lowerCAmelCase__ : List[Any] = projection_dim
lowerCAmelCase__ : Optional[Any] = logit_scale_init_value
lowerCAmelCase__ : int = 1.0
lowerCAmelCase__ : Union[str, Any] = 0.02
lowerCAmelCase__ : Tuple = image_text_hidden_size
@classmethod
def __magic_name__( cls , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Dict = self.text_config.to_dict()
lowerCAmelCase__ : Optional[Any] = self.vision_config.to_dict()
lowerCAmelCase__ : str = self.__class__.model_type
return output
| 678 |
lowerCAmelCase_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCAmelCase_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' )
lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' )
lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : Tuple = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : List[Any] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase__ : int = 1
if from_exponent > to_exponent:
lowerCAmelCase__ : List[str] = from_exponent - to_exponent
else:
lowerCAmelCase__ : Dict = -(to_exponent - from_exponent)
return value * pow(10 , UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 678 | 1 |
import numpy as np
from PIL import Image
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray:
lowerCAmelCase__ : Dict = np.array(UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Tuple = 0
# compute the shape of the output matrix
lowerCAmelCase__ : List[str] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase__ : List[str] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase__ : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Tuple = 0
return updated_arr
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray:
lowerCAmelCase__ : Optional[Any] = np.array(UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Dict = 0
# compute the shape of the output matrix
lowerCAmelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase__ : str = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase__ : str = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
lowerCAmelCase_ = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 678 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__UpperCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 |
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
# Return True if there is node that has not iterated.
lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase )
lowerCAmelCase__ : Tuple = [s]
lowerCAmelCase__ : Dict = True
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = u
return visited[t]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase ))
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = float('''Inf''' )
lowerCAmelCase__ : Dict = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] )
lowerCAmelCase__ : List[Any] = parent[s]
max_flow += path_flow
lowerCAmelCase__ : List[Any] = sink
while v != source:
lowerCAmelCase__ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase__ : Optional[Any] = parent[v]
for i in range(len(UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 678 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=[] ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = size[0] - overlap_pixels * 2
lowerCAmelCase__ : Tuple = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCAmelCase__ : List[Any] = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCAmelCase__ : Optional[int] = np.pad(UpperCamelCase , mode='''linear_ramp''' , pad_width=UpperCamelCase , end_values=0 )
if "l" in remove_borders:
lowerCAmelCase__ : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCAmelCase__ : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCAmelCase__ : List[str] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCAmelCase__ : List[Any] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
return max(UpperCamelCase , min(UpperCamelCase , UpperCamelCase ) )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
lowerCAmelCase__ : Any = list(UpperCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCAmelCase__ : int = clamp_rect(UpperCamelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCamelCase , (original_slice, 0) )
return result
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[int] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCAmelCase__ : Union[str, Any] = tile.crop(UpperCamelCase )
return tile
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : List[Any] = n % d
return n - divisor
class _lowerCAmelCase ( _lowercase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 350 , ):
super().__init__(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , max_noise_level=__UpperCAmelCase , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCAmelCase__ : Union[str, Any] = add_overlap_rect(__UpperCAmelCase , __UpperCAmelCase , image.size )
lowerCAmelCase__ : int = image.crop(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCAmelCase__ : Dict = translated_slice_x - (original_image_slice / 2)
lowerCAmelCase__ : List[Any] = max(0 , __UpperCAmelCase )
lowerCAmelCase__ : int = squeeze_tile(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = to_input.size
lowerCAmelCase__ : int = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCAmelCase__ : str = super(__UpperCAmelCase , self ).__call__(image=__UpperCAmelCase , **__UpperCAmelCase ).images[0]
lowerCAmelCase__ : List[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCAmelCase__ : Tuple = unsqueeze_tile(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCAmelCase__ : List[str] = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
lowerCAmelCase__ : Optional[int] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__UpperCAmelCase ) , mode='''L''' , )
final_image.paste(
__UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 75 , __UpperCAmelCase = 9.0 , __UpperCAmelCase = 50 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 128 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , ):
lowerCAmelCase__ : Any = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
lowerCAmelCase__ : Optional[int] = math.ceil(image.size[0] / tile_size )
lowerCAmelCase__ : Optional[int] = math.ceil(image.size[1] / tile_size )
lowerCAmelCase__ : Dict = tcx * tcy
lowerCAmelCase__ : Any = 0
for y in range(__UpperCAmelCase ):
for x in range(__UpperCAmelCase ):
self._process_tile(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prompt=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , noise_level=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def __lowerCAmelCase ( ) -> Any:
# Run a demo
lowerCAmelCase__ : str = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCAmelCase__ : str = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCamelCase , revision='''fp16''' , torch_dtype=torch.floataa )
lowerCAmelCase__ : List[str] = pipe.to('''cuda''' )
lowerCAmelCase__ : Union[str, Any] = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(UpperCamelCase ):
print(F"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('''diffusers_library_progress.jpg''' )
lowerCAmelCase__ : Any = pipe(image=UpperCamelCase , prompt='''Black font, white background, vector''' , noise_level=40 , callback=UpperCamelCase )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 678 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
A__ = MODEL_FOR_CAUSAL_LM_MAPPING
A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id
lowerCAmelCase__ : List[Any] = '''<pad>'''
lowerCAmelCase__ : List[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def __magic_name__( self ):
lowerCAmelCase__ : Any = '''Hello I believe in'''
lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator.model
lowerCAmelCase__ : Optional[int] = text_generator.tokenizer
lowerCAmelCase__ : Tuple = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ : List[str] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ : str = text_generator('''''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
# Classic `model_kwargs`
lowerCAmelCase__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''Hello world'''
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 678 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> str:
lowerCAmelCase__ : Optional[Any] = int(UpperCamelCase )
assert noofclusters < len(UpperCamelCase )
# Find out the dimensionality
lowerCAmelCase__ : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCAmelCase__ : Any = list(range(len(UpperCamelCase ) ) )
shuffle(UpperCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCAmelCase__ : int = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCAmelCase__ : Optional[int] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCAmelCase__ : Tuple = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCAmelCase__ : Dict = tf.placeholder('''float64''' , [dim] )
lowerCAmelCase__ : int = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCamelCase , UpperCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCAmelCase__ : Any = [tf.Variable(0 ) for i in range(len(UpperCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCAmelCase__ : Union[str, Any] = tf.placeholder('''int32''' )
lowerCAmelCase__ : List[str] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCamelCase , UpperCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCAmelCase__ : str = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCAmelCase__ : Any = tf.reduce_mean(UpperCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCAmelCase__ : Optional[Any] = tf.placeholder('''float''' , [dim] )
lowerCAmelCase__ : Optional[int] = tf.placeholder('''float''' , [dim] )
lowerCAmelCase__ : Tuple = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCamelCase , UpperCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCAmelCase__ : Any = tf.placeholder('''float''' , [noofclusters] )
lowerCAmelCase__ : List[Any] = tf.argmin(UpperCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCAmelCase__ : Union[str, Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCAmelCase__ : List[str] = 100
for _ in range(UpperCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCamelCase ) ):
lowerCAmelCase__ : str = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCAmelCase__ : Dict = [
sess.run(UpperCamelCase , feed_dict={va: vect, va: sess.run(UpperCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCAmelCase__ : int = sess.run(
UpperCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCamelCase ):
# Collect all the vectors assigned to this cluster
lowerCAmelCase__ : Tuple = [
vectors[i]
for i in range(len(UpperCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCAmelCase__ : str = sess.run(
UpperCamelCase , feed_dict={mean_input: array(UpperCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCAmelCase__ : Union[str, Any] = sess.run(UpperCamelCase )
lowerCAmelCase__ : str = sess.run(UpperCamelCase )
return centroids, assignments
| 678 |
def __lowerCAmelCase ( UpperCamelCase ) -> str:
return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] )
def __lowerCAmelCase ( UpperCamelCase ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(UpperCamelCase ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 | 1 |
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> list:
lowerCAmelCase__ : Optional[Any] = word.split()
def justify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
lowerCAmelCase__ : Union[str, Any] = max_width - width
lowerCAmelCase__ : Union[str, Any] = len(UpperCamelCase )
if len(UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ : Dict = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ : List[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ : List[str] = []
for i in range(UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase )
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : list[str] = []
lowerCAmelCase__ : Union[str, Any] = 0
for word in words:
if width + len(UpperCamelCase ) + len(UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase )
width += len(UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
# reset new line and new width
lowerCAmelCase__ , lowerCAmelCase__ : Dict = [word], len(UpperCamelCase )
lowerCAmelCase__ : int = max_width - width - len(UpperCamelCase )
answer.append(''' '''.join(UpperCamelCase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 678 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _lowerCAmelCase ( _lowercase ):
A__ = (DPMSolverSDEScheduler,)
A__ = 10
def __magic_name__( self , **__UpperCAmelCase ):
lowerCAmelCase__ : Dict = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__UpperCAmelCase )
return config
def __magic_name__( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def __magic_name__( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase )
def __magic_name__( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def __magic_name__( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = output.prev_sample
lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 678 | 1 |
import numpy as np
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1E-12 , UpperCamelCase = 100 , ) -> tuple[float, np.ndarray]:
assert np.shape(UpperCamelCase )[0] == np.shape(UpperCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCamelCase )[0] == np.shape(UpperCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCamelCase ) == np.iscomplexobj(UpperCamelCase )
lowerCAmelCase__ : Dict = np.iscomplexobj(UpperCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Union[str, Any] = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCAmelCase__ : str = np.dot(UpperCamelCase , UpperCamelCase )
# Normalize the resulting output vector.
lowerCAmelCase__ : Optional[Any] = w / np.linalg.norm(UpperCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCAmelCase__ : List[str] = vector.conj().T if is_complex else vector.T
lowerCAmelCase__ : int = np.dot(UpperCamelCase , np.dot(UpperCamelCase , UpperCamelCase ) )
# Check convergence.
lowerCAmelCase__ : int = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Union[str, Any] = lambda_
if is_complex:
lowerCAmelCase__ : Dict = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase ( ) -> None:
lowerCAmelCase__ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCAmelCase__ : Any = np.array([41, 4, 20] )
lowerCAmelCase__ : int = real_input_matrix.astype(np.complexaaa )
lowerCAmelCase__ : Tuple = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCAmelCase__ : int = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCAmelCase__ : List[str] = real_input_matrix
lowerCAmelCase__ : List[Any] = real_vector
elif problem_type == "complex":
lowerCAmelCase__ : Dict = complex_input_matrix
lowerCAmelCase__ : Optional[int] = complex_vector
# Our implementation.
lowerCAmelCase__ , lowerCAmelCase__ : Any = power_iteration(UpperCamelCase , UpperCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = np.linalg.eigh(UpperCamelCase )
# Last eigenvalue is the maximum one.
lowerCAmelCase__ : str = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCAmelCase__ : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCamelCase ) - np.abs(UpperCamelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 678 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = 3
lowerCAmelCase__ : Tuple = 250
lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : List[str] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__UpperCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
| 678 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'mvp'
A__ = ['past_key_values']
A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=5_0267 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=100 , __UpperCAmelCase=800 , **__UpperCAmelCase , ):
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Any = max_position_embeddings
lowerCAmelCase__ : List[str] = d_model
lowerCAmelCase__ : Any = encoder_ffn_dim
lowerCAmelCase__ : str = encoder_layers
lowerCAmelCase__ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase__ : Tuple = decoder_ffn_dim
lowerCAmelCase__ : Optional[int] = decoder_layers
lowerCAmelCase__ : List[Any] = decoder_attention_heads
lowerCAmelCase__ : Optional[int] = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : Optional[int] = activation_dropout
lowerCAmelCase__ : Union[str, Any] = activation_function
lowerCAmelCase__ : str = init_std
lowerCAmelCase__ : Dict = encoder_layerdrop
lowerCAmelCase__ : Optional[Any] = decoder_layerdrop
lowerCAmelCase__ : Optional[int] = classifier_dropout
lowerCAmelCase__ : Union[str, Any] = use_cache
lowerCAmelCase__ : str = encoder_layers
lowerCAmelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase__ : Dict = use_prompt
lowerCAmelCase__ : Union[str, Any] = prompt_length
lowerCAmelCase__ : Union[str, Any] = prompt_mid_dim
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
lowerCAmelCase__ : Tuple = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
| 678 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowerCAmelCase ( UpperCamelCase = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(UpperCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
lowerCAmelCase__ : int = R'''\w+[.]\d+'''
lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase )
for pat in pats:
lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) )
return key
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) )
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase__ : str = rename_key(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase )
return unflatten_dict(UpperCamelCase )
| 678 | 1 |
def __lowerCAmelCase ( UpperCamelCase ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(UpperCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = """Hello world! cécé herlolip"""
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCAmelCase__ : Any = FairseqRobertaModel.from_pretrained(UpperCamelCase )
roberta.eval() # disable dropout
lowerCAmelCase__ : str = roberta.model.encoder.sentence_encoder
lowerCAmelCase__ : int = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowerCAmelCase__ : List[str] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , UpperCamelCase )
lowerCAmelCase__ : int = XLMRobertaXLForSequenceClassification(UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase__ : List[Any] = roberta_sent_encoder.embed_tokens.weight
lowerCAmelCase__ : str = roberta_sent_encoder.embed_positions.weight
lowerCAmelCase__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCAmelCase__ : int = roberta_sent_encoder.layer_norm.weight
lowerCAmelCase__ : List[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase__ : BertLayer = model.roberta.encoder.layer[i]
lowerCAmelCase__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowerCAmelCase__ : RobertaAttention = layer.attention
lowerCAmelCase__ : int = roberta_layer.self_attn_layer_norm.weight
lowerCAmelCase__ : Union[str, Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCAmelCase__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCAmelCase__ : int = roberta_layer.self_attn.q_proj.weight
lowerCAmelCase__ : Union[str, Any] = roberta_layer.self_attn.q_proj.bias
lowerCAmelCase__ : str = roberta_layer.self_attn.k_proj.weight
lowerCAmelCase__ : Tuple = roberta_layer.self_attn.k_proj.bias
lowerCAmelCase__ : List[str] = roberta_layer.self_attn.v_proj.weight
lowerCAmelCase__ : int = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCAmelCase__ : Dict = roberta_layer.self_attn.out_proj.weight
lowerCAmelCase__ : Tuple = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCAmelCase__ : Tuple = roberta_layer.final_layer_norm.weight
lowerCAmelCase__ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
lowerCAmelCase__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase__ : Tuple = roberta_layer.fca.weight
lowerCAmelCase__ : Optional[int] = roberta_layer.fca.bias
# output
lowerCAmelCase__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase__ : str = roberta_layer.fca.weight
lowerCAmelCase__ : List[str] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCAmelCase__ : List[str] = roberta.model.classification_heads['''mnli'''].dense.weight
lowerCAmelCase__ : Optional[int] = roberta.model.classification_heads['''mnli'''].dense.bias
lowerCAmelCase__ : List[str] = roberta.model.classification_heads['''mnli'''].out_proj.weight
lowerCAmelCase__ : int = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCAmelCase__ : Dict = roberta.model.encoder.lm_head.dense.weight
lowerCAmelCase__ : int = roberta.model.encoder.lm_head.dense.bias
lowerCAmelCase__ : Dict = roberta.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase__ : Tuple = roberta.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase__ : List[str] = roberta.model.encoder.lm_head.weight
lowerCAmelCase__ : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase__ : torch.Tensor = roberta.encode(UpperCamelCase ).unsqueeze(0 ) # batch of size 1
lowerCAmelCase__ : Optional[int] = model(UpperCamelCase )[0]
if classification_head:
lowerCAmelCase__ : int = roberta.model.classification_heads['''mnli'''](roberta.extract_features(UpperCamelCase ) )
else:
lowerCAmelCase__ : Optional[Any] = roberta.model(UpperCamelCase )[0]
print(our_output.shape , their_output.shape )
lowerCAmelCase__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCAmelCase__ : List[Any] = torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(UpperCamelCase ).mkdir(parents=UpperCamelCase , exist_ok=UpperCamelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 678 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCAmelCase_ = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def __magic_name__( cls ):
lowerCAmelCase__ : Dict = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def __magic_name__( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Any = flatten_dict(modela.params )
lowerCAmelCase__ : List[str] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowerCAmelCase__ : Optional[Any] = False
return models_are_equal
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase )
lowerCAmelCase__ : Dict = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = '''bert'''
lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = '''bert'''
lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 678 | 1 |
lowerCAmelCase_ = {str(digit): digit**5 for digit in range(10)}
def __lowerCAmelCase ( UpperCamelCase ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def __lowerCAmelCase ( ) -> int:
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 678 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : Optional[Any] = 0
if start < end:
lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : List[Any] = a[pivot]
lowerCAmelCase__ : str = temp
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 )
count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase )
return count
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = a[end]
lowerCAmelCase__ : Optional[int] = a[pivot]
lowerCAmelCase__ : List[str] = temp
lowerCAmelCase__ : str = start - 1
for index in range(UpperCamelCase , UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ : List[str] = new_pivot_index + 1
lowerCAmelCase__ : int = a[new_pivot_index]
lowerCAmelCase__ : int = a[index]
lowerCAmelCase__ : Tuple = temp
lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1]
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : Union[str, Any] = temp
return new_pivot_index + 1, count
lowerCAmelCase_ = TemporaryFile()
lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation
lowerCAmelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase_ = np.load(outfile)
lowerCAmelCase_ = len(M) - 1
lowerCAmelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 678 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase_ = logging.getLogger(__name__)
class _lowerCAmelCase ( _lowercase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
super().__init__(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
lowerCAmelCase__ : int = None
def __magic_name__( self , __UpperCAmelCase ):
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
lowerCAmelCase__ : Any = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowerCAmelCase__ : Optional[Any] = str(distributed_port + 1 )
lowerCAmelCase__ : Any = dist.new_group(ranks=__UpperCAmelCase , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __magic_name__( self ):
return dist.get_rank(group=self.process_group ) == 0
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=torch.floataa ):
lowerCAmelCase__ : Tuple = torch.empty(__UpperCAmelCase , dtype=__UpperCAmelCase )
dist.scatter(__UpperCAmelCase , src=0 , scatter_list=__UpperCAmelCase , group=self.process_group )
return target_tensor
def __magic_name__( self ):
lowerCAmelCase__ : str = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowerCAmelCase__ : List[Any] = next((addr for addr in addrs if addr.startswith('''e''' )) , __UpperCAmelCase )
return ifname
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
# single GPU training
if not dist.is_initialized():
lowerCAmelCase__ , lowerCAmelCase__ : str = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase )
# distributed training
lowerCAmelCase__ : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
lowerCAmelCase__ : Tuple = None
if self._is_main():
lowerCAmelCase__ : List[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__UpperCAmelCase )]
dist.gather(torch.tensor(__UpperCAmelCase ) , dst=0 , gather_list=__UpperCAmelCase , group=self.process_group )
# scatter logic
lowerCAmelCase__ : List[Any] = question_hidden_states.shape[0]
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Union[str, Any] = []
if self._is_main():
assert len(__UpperCAmelCase ) == world_size
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self._main_retrieve(torch.cat(__UpperCAmelCase ).numpy() , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = torch.tensor(__UpperCAmelCase ), torch.tensor(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = self._chunk_tensor(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = self._chunk_tensor(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self._scattered(__UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
lowerCAmelCase__ : Any = self._scattered(__UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__UpperCAmelCase )
| 678 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : str = features.copy() if features else default_expected_features
lowerCAmelCase__ : List[Any] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = [parquet_path]
lowerCAmelCase__ : int = tmp_path / '''cache'''
lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCAmelCase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase__ : Optional[int] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
if split:
lowerCAmelCase__ : Tuple = {split: parquet_path}
else:
lowerCAmelCase__ : int = '''train'''
lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ : int = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ : Dict = {'''image''': [image_path]}
lowerCAmelCase__ : int = Features({'''image''': Image()} )
lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
assert get_writer_batch_size(UpperCamelCase ) == expected
| 678 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Optional[Any] = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : List[str] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : Union[str, Any] = type_vocab_size
lowerCAmelCase__ : Optional[Any] = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : int = num_choices
lowerCAmelCase__ : Union[str, Any] = scope
lowerCAmelCase__ : Any = self.vocab_size - 1
def __magic_name__( self ):
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : int = None
if self.use_token_type_ids:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase__ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ):
lowerCAmelCase__ : Any = OpenAIGPTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : str = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase )
lowerCAmelCase__ : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ):
lowerCAmelCase__ : Dict = OpenAIGPTLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ):
lowerCAmelCase__ : str = OpenAIGPTDoubleHeadsModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ):
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Optional[int] = OpenAIGPTForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : List[str] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self ):
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase__ : Any = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
A__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
lowerCAmelCase__ : Any = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase , )
lowerCAmelCase__ : Tuple = inputs_dict['''labels''']
lowerCAmelCase__ : int = inputs_dict['''labels''']
lowerCAmelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__UpperCAmelCase , )
lowerCAmelCase__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = OpenAIGPTModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def __magic_name__( self ):
self.config_tester.run_common_tests()
def __magic_name__( self ):
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__UpperCAmelCase )
@slow
def __magic_name__( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = OpenAIGPTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ : str = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__UpperCAmelCase ) # the president is
lowerCAmelCase__ : Optional[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase__ : Optional[Any] = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __UpperCAmelCase )
| 678 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class _lowerCAmelCase ( _lowercase , _lowercase ):
A__ = 'focalnet'
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : Optional[int] = use_conv_embed
lowerCAmelCase__ : Optional[int] = hidden_sizes
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : Dict = focal_levels
lowerCAmelCase__ : int = focal_windows
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = mlp_ratio
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = drop_path_rate
lowerCAmelCase__ : Tuple = use_layerscale
lowerCAmelCase__ : List[Any] = layerscale_value
lowerCAmelCase__ : Dict = use_post_layernorm
lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation
lowerCAmelCase__ : Dict = normalize_modulator
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Tuple = encoder_stride
lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 678 | 1 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowerCAmelCase ( UpperCamelCase = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(UpperCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 678 |
from scipy.stats import pearsonr
import datasets
lowerCAmelCase_ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCAmelCase_ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCAmelCase_ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def __magic_name__( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
if return_pvalue:
lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
| 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 |
from manim import *
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )]
lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 )
lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 )
lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ : List[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 )
lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 678 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( _lowercase ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 678 |
import collections
import os
import re
from pathlib import Path
lowerCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase_ = re.compile(R"""^\s*else:""")
def __lowerCAmelCase ( UpperCamelCase ) -> int:
if _re_test_backend.search(UpperCamelCase ) is None:
return None
lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )]
backends.sort()
return "_and_".join(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ : Union[str, Any] = f.readlines()
lowerCAmelCase__ : Tuple = 0
while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase ):
lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0]
lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase )
if single_line_import_search is not None:
lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ : Any = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase__ : str = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_between_brackets.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_quote_object.search(UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ : Any = []
while (
line_index < len(UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase__ : Tuple = lines[line_index]
lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ : Dict = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase__ : Any = lines[line_index]
lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
def find_duplicates(UpperCamelCase ):
return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ : Optional[Any] = []
for key in import_dict_objects.keys():
lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : Dict = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' )
lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase )
if objects is not None:
lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
lowerCAmelCase__ : str = []
for path, directories, files in os.walk(UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' )
submodules.append(UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(UpperCamelCase )
return submodules
lowerCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __lowerCAmelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase )
lowerCAmelCase__ : int = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
lowerCAmelCase__ : str = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) )
lowerCAmelCase__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 678 | 1 |
def __lowerCAmelCase ( UpperCamelCase ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(UpperCamelCase ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Union[str, Any] = seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Union[str, Any] = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : List[str] = embedding_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Optional[Any] = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[str] = num_choices
lowerCAmelCase__ : Any = scope
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : str = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
A__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
# test_resize_embeddings = False
A__ = False
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def __magic_name__( self ):
lowerCAmelCase__ : str = MegatronBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __magic_name__( self ):
self.config_tester.run_common_tests()
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
return torch.tensor(
UpperCamelCase , dtype=torch.long , device=UpperCamelCase , )
lowerCAmelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.half()
lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0]
lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj]
lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj]
lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
| 678 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCAmelCase__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowerCAmelCase__ : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowerCAmelCase__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __magic_name__( self ):
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase__ : Any = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__( self ):
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase__ : int = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__( self ):
lowerCAmelCase__ : int = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__( self ):
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
lowerCAmelCase__ : List[str] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ = (accelerator.state.process_index + 2, 10)
lowerCAmelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
lowerCAmelCase_ = """"""
lowerCAmelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCAmelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCAmelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 678 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'bart'
A__ = ['past_key_values']
A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : int = d_model
lowerCAmelCase__ : str = encoder_ffn_dim
lowerCAmelCase__ : Any = encoder_layers
lowerCAmelCase__ : Dict = encoder_attention_heads
lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCAmelCase__ : Union[str, Any] = decoder_layers
lowerCAmelCase__ : Any = decoder_attention_heads
lowerCAmelCase__ : Tuple = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : Any = activation_dropout
lowerCAmelCase__ : Optional[Any] = activation_function
lowerCAmelCase__ : Union[str, Any] = init_std
lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase__ : int = decoder_layerdrop
lowerCAmelCase__ : Optional[int] = classifier_dropout
lowerCAmelCase__ : str = use_cache
lowerCAmelCase__ : int = encoder_layers
lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
lowerCAmelCase__ : str = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class _lowerCAmelCase ( _lowercase ):
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ : Any = {0: '''batch'''}
lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Dict = super().outputs
else:
lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
lowerCAmelCase__ : int = seq_length if not self.use_past else 1
lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape
lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads
lowerCAmelCase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : List[Any] = decoder_seq_length + 3
lowerCAmelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase__ : Any = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers
lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : List[str] = seqlen + 2
lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads
lowerCAmelCase__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype
lowerCAmelCase__ : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[Any] = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Tuple = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
lowerCAmelCase__ : int = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 678 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
# Initialise PyTorch model
lowerCAmelCase__ : Any = AlbertConfig.from_json_file(UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ : Union[str, Any] = AlbertForPreTraining(UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 678 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'sew-d'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Optional[int] = feat_extract_norm
lowerCAmelCase__ : str = feat_extract_activation
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : Any = list(__UpperCAmelCase )
lowerCAmelCase__ : int = conv_bias
lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings
lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups
lowerCAmelCase__ : int = len(self.conv_dim )
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : int = squeeze_factor
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Any = position_buckets
lowerCAmelCase__ : Optional[int] = share_att_key
lowerCAmelCase__ : Tuple = relative_attention
lowerCAmelCase__ : Optional[int] = norm_rel_ebd
lowerCAmelCase__ : Tuple = list(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_dropout
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : List[Any] = feat_proj_dropout
lowerCAmelCase__ : Any = final_dropout
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = feature_layer_norm_eps
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Tuple = apply_spec_augment
lowerCAmelCase__ : List[str] = mask_time_prob
lowerCAmelCase__ : int = mask_time_length
lowerCAmelCase__ : int = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : int = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# ctc loss
lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# sequence classification
lowerCAmelCase__ : Tuple = use_weighted_layer_sum
lowerCAmelCase__ : Dict = classifier_proj_size
@property
def __magic_name__( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 678 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : str = features.copy() if features else default_expected_features
lowerCAmelCase__ : List[Any] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = [parquet_path]
lowerCAmelCase__ : int = tmp_path / '''cache'''
lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCAmelCase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase__ : Optional[int] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
if split:
lowerCAmelCase__ : Tuple = {split: parquet_path}
else:
lowerCAmelCase__ : int = '''train'''
lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ : int = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ : Dict = {'''image''': [image_path]}
lowerCAmelCase__ : int = Features({'''image''': Image()} )
lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
assert get_writer_batch_size(UpperCamelCase ) == expected
| 678 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = '''</s>'''
lowerCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__UpperCAmelCase ) , 1103 )
def __magic_name__( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.'''
lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example''']
lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[int] = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def __magic_name__( self ):
# fmt: off
lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : str = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Tuple = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(
__UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 678 | 1 |
lowerCAmelCase_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCAmelCase_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' )
lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' )
lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : Tuple = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : List[Any] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase__ : int = 1
if from_exponent > to_exponent:
lowerCAmelCase__ : List[str] = from_exponent - to_exponent
else:
lowerCAmelCase__ : Dict = -(to_exponent - from_exponent)
return value * pow(10 , UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 678 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'donut-swin'
A__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : List[str] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Optional[Any] = embed_dim
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : str = mlp_ratio
lowerCAmelCase__ : Optional[int] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[str] = use_absolute_embeddings
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 678 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( UpperCamelCase=None ) -> Dict:
if subparsers is not None:
lowerCAmelCase__ : int = subparsers.add_parser('''test''' )
else:
lowerCAmelCase__ : Any = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=UpperCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
lowerCAmelCase__ : Optional[int] = script_name
else:
lowerCAmelCase__ : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
lowerCAmelCase__ : str = ['''accelerate-launch'''] + test_args.split()
lowerCAmelCase__ : Optional[Any] = execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __lowerCAmelCase ( ) -> Any:
lowerCAmelCase__ : Tuple = test_command_parser()
lowerCAmelCase__ : List[str] = parser.parse_args()
test_command(UpperCamelCase )
if __name__ == "__main__":
main()
| 678 |
lowerCAmelCase_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCAmelCase_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' )
lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' )
lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : Tuple = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : List[Any] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase__ : int = 1
if from_exponent > to_exponent:
lowerCAmelCase__ : List[str] = from_exponent - to_exponent
else:
lowerCAmelCase__ : Dict = -(to_exponent - from_exponent)
return value * pow(10 , UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 678 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class _lowerCAmelCase ( _lowercase , _lowercase ):
A__ = 'focalnet'
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : Optional[int] = use_conv_embed
lowerCAmelCase__ : Optional[int] = hidden_sizes
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : Dict = focal_levels
lowerCAmelCase__ : int = focal_windows
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = mlp_ratio
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = drop_path_rate
lowerCAmelCase__ : Tuple = use_layerscale
lowerCAmelCase__ : List[Any] = layerscale_value
lowerCAmelCase__ : Dict = use_post_layernorm
lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation
lowerCAmelCase__ : Dict = normalize_modulator
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Tuple = encoder_stride
lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 678 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__UpperCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 678 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__()
lowerCAmelCase__ : str = module
lowerCAmelCase__ : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , )
lowerCAmelCase__ : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __magic_name__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A__ = 'bigscience/bloom-1b7'
# Constant values
A__ = 2.109659552692574
A__ = 'Hello my name is'
A__ = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
A__ = 10
def __magic_name__( self ):
# Models and tokenizer
lowerCAmelCase__ : str = AutoTokenizer.from_pretrained(self.model_name )
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
super().setUp()
# Models and tokenizer
lowerCAmelCase__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
def __magic_name__( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.model_abit.config
self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) )
lowerCAmelCase__ : List[str] = config.to_dict()
lowerCAmelCase__ : int = config.to_diff_dict()
lowerCAmelCase__ : Optional[Any] = config.to_json_string()
def __magic_name__( self ):
from bitsandbytes.nn import Paramsabit
lowerCAmelCase__ : Optional[Any] = self.model_fpaa.get_memory_footprint()
lowerCAmelCase__ : Optional[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCAmelCase__ : List[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __magic_name__( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCAmelCase__ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = BitsAndBytesConfig()
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' )
lowerCAmelCase__ : int = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCAmelCase__ : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def __magic_name__( self ):
with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = BitsAndBytesConfig()
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __magic_name__( self ):
with self.assertRaises(__UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCAmelCase__ : List[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCAmelCase__ : int = self.model_fpaa.to(torch.floataa )
lowerCAmelCase__ : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
lowerCAmelCase__ : int = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCAmelCase__ : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCAmelCase__ : int = self.model_fpaa.float()
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def __magic_name__( cls ):
lowerCAmelCase__ : str = '''t5-small'''
lowerCAmelCase__ : Optional[int] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCAmelCase__ : List[Any] = '''Translate in German: Hello, my dog is cute'''
def __magic_name__( self ):
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self ):
from transformers import TaForConditionalGeneration
lowerCAmelCase__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCAmelCase__ : Any = None
# test with `t5-small`
lowerCAmelCase__ : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
lowerCAmelCase__ : Optional[int] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ : Dict = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
lowerCAmelCase__ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
lowerCAmelCase__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ : Optional[Any] = model.generate(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = modules
def __magic_name__( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCAmelCase__ : Optional[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCAmelCase__ : Tuple = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ : List[str] = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
lowerCAmelCase__ : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
lowerCAmelCase__ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ : str = model.generate(**__UpperCAmelCase )
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
super().setUp()
# model_name
lowerCAmelCase__ : Optional[Any] = '''bigscience/bloom-560m'''
lowerCAmelCase__ : Any = '''t5-small'''
# Different types of model
lowerCAmelCase__ : List[str] = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# Sequence classification model
lowerCAmelCase__ : str = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# CausalLM model
lowerCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# Seq2seq model
lowerCAmelCase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
def __magic_name__( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
super().setUp()
def __magic_name__( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self ):
lowerCAmelCase__ : Dict = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCAmelCase__ : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
super().setUp()
def __magic_name__( self ):
lowerCAmelCase__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCAmelCase__ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCAmelCase__ : Optional[Any] = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
lowerCAmelCase__ : str = '''facebook/opt-350m'''
super().setUp()
def __magic_name__( self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCAmelCase__ : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCAmelCase__ : Union[str, Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCAmelCase__ : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCAmelCase ) ):
lowerCAmelCase__ : List[str] = LoRALayer(module.q_proj , rank=16 )
lowerCAmelCase__ : int = LoRALayer(module.k_proj , rank=16 )
lowerCAmelCase__ : Tuple = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
lowerCAmelCase__ : Tuple = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCAmelCase__ : str = model.forward(**__UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowerCAmelCase ( _lowercase ):
A__ = 'gpt2-xl'
A__ = 3.3191854854152187
| 678 |
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
# Return True if there is node that has not iterated.
lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase )
lowerCAmelCase__ : Tuple = [s]
lowerCAmelCase__ : Dict = True
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = u
return visited[t]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase ))
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = float('''Inf''' )
lowerCAmelCase__ : Dict = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] )
lowerCAmelCase__ : List[Any] = parent[s]
max_flow += path_flow
lowerCAmelCase__ : List[Any] = sink
while v != source:
lowerCAmelCase__ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase__ : Optional[Any] = parent[v]
for i in range(len(UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 678 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
lowerCAmelCase_ = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
lowerCAmelCase_ = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def __magic_name__( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="auto" , __UpperCAmelCase=-1 , __UpperCAmelCase=0.9 , __UpperCAmelCase=5 , __UpperCAmelCase=500 , __UpperCAmelCase="gpt2-large" , __UpperCAmelCase=-1 , __UpperCAmelCase=1024 , __UpperCAmelCase=25 , __UpperCAmelCase=5 , __UpperCAmelCase=True , __UpperCAmelCase=25 , ):
lowerCAmelCase__ : Tuple = compute_mauve(
p_text=__UpperCAmelCase , q_text=__UpperCAmelCase , p_features=__UpperCAmelCase , q_features=__UpperCAmelCase , p_tokens=__UpperCAmelCase , q_tokens=__UpperCAmelCase , num_buckets=__UpperCAmelCase , pca_max_data=__UpperCAmelCase , kmeans_explained_var=__UpperCAmelCase , kmeans_num_redo=__UpperCAmelCase , kmeans_max_iter=__UpperCAmelCase , featurize_model_name=__UpperCAmelCase , device_id=__UpperCAmelCase , max_text_length=__UpperCAmelCase , divergence_curve_discretization_size=__UpperCAmelCase , mauve_scaling_factor=__UpperCAmelCase , verbose=__UpperCAmelCase , seed=__UpperCAmelCase , )
return out
| 678 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
A__ = MODEL_FOR_CAUSAL_LM_MAPPING
A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id
lowerCAmelCase__ : List[Any] = '''<pad>'''
lowerCAmelCase__ : List[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def __magic_name__( self ):
lowerCAmelCase__ : Any = '''Hello I believe in'''
lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator.model
lowerCAmelCase__ : Optional[int] = text_generator.tokenizer
lowerCAmelCase__ : Tuple = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ : List[str] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ : str = text_generator('''''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
# Classic `model_kwargs`
lowerCAmelCase__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''Hello world'''
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 678 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
lowerCAmelCase__ : int = R'''\w+[.]\d+'''
lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase )
for pat in pats:
lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) )
return key
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) )
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase__ : str = rename_key(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase )
return unflatten_dict(UpperCamelCase )
| 678 |
def __lowerCAmelCase ( UpperCamelCase ) -> str:
return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] )
def __lowerCAmelCase ( UpperCamelCase ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(UpperCamelCase ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : Optional[Any] = 0
if start < end:
lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : List[Any] = a[pivot]
lowerCAmelCase__ : str = temp
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 )
count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase )
return count
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = a[end]
lowerCAmelCase__ : Optional[int] = a[pivot]
lowerCAmelCase__ : List[str] = temp
lowerCAmelCase__ : str = start - 1
for index in range(UpperCamelCase , UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ : List[str] = new_pivot_index + 1
lowerCAmelCase__ : int = a[new_pivot_index]
lowerCAmelCase__ : int = a[index]
lowerCAmelCase__ : Tuple = temp
lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1]
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : Union[str, Any] = temp
return new_pivot_index + 1, count
lowerCAmelCase_ = TemporaryFile()
lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation
lowerCAmelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase_ = np.load(outfile)
lowerCAmelCase_ = len(M) - 1
lowerCAmelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 678 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _lowerCAmelCase ( _lowercase ):
A__ = (DPMSolverSDEScheduler,)
A__ = 10
def __magic_name__( self , **__UpperCAmelCase ):
lowerCAmelCase__ : Dict = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__UpperCAmelCase )
return config
def __magic_name__( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def __magic_name__( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase )
def __magic_name__( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def __magic_name__( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = output.prev_sample
lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 678 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
@property
def __magic_name__( self ):
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.dummy_uncond_unet
lowerCAmelCase__ : Any = PNDMScheduler()
lowerCAmelCase__ : int = PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pndm.to(__UpperCAmelCase )
pndm.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = pndm(generator=__UpperCAmelCase , num_inference_steps=20 , output_type='''numpy''' ).images
lowerCAmelCase__ : int = torch.manual_seed(0 )
lowerCAmelCase__ : Any = pndm(generator=__UpperCAmelCase , num_inference_steps=20 , output_type='''numpy''' , return_dict=__UpperCAmelCase )[0]
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : List[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = '''google/ddpm-cifar10-32'''
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = PNDMScheduler()
lowerCAmelCase__ : str = PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pndm.to(__UpperCAmelCase )
pndm.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = pndm(generator=__UpperCAmelCase , output_type='''numpy''' ).images
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Any = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 678 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = 3
lowerCAmelCase__ : Tuple = 250
lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : List[str] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__UpperCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
| 678 | 1 |
import argparse
import json
import subprocess
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> str:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Optional[Any] = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowerCAmelCase__ : Tuple = subprocess.run(UpperCamelCase , shell=UpperCamelCase , stdout=subprocess.PIPE )
lowerCAmelCase__ : Optional[int] = output.stdout.decode('''utf-8''' )
lowerCAmelCase__ : Optional[int] = json.loads(UpperCamelCase )
lowerCAmelCase__ : List[Any] = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCamelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : str = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __lowerCAmelCase ( UpperCamelCase ) -> int:
return values.split(''',''' )
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowerCAmelCase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 678 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowerCAmelCase ( UpperCamelCase = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(UpperCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 678 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = 16 , UpperCamelCase = "bert-base-cased" ) -> Dict:
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Tuple = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ : Optional[Any] = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
lowerCAmelCase__ : Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
model.eval()
lowerCAmelCase__ : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : Any = model(**UpperCamelCase )
lowerCAmelCase__ : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase ) - 1:
lowerCAmelCase__ : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = metric.compute()
return eval_metric["accuracy"]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
# Initialize accelerator
lowerCAmelCase__ : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : str = config['''lr''']
lowerCAmelCase__ : Tuple = int(config['''num_epochs'''] )
lowerCAmelCase__ : Optional[int] = int(config['''seed'''] )
lowerCAmelCase__ : Optional[int] = int(config['''batch_size'''] )
lowerCAmelCase__ : str = args.model_name_or_path
set_seed(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = get_dataloaders(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase , return_dict=UpperCamelCase )
# Instantiate optimizer
lowerCAmelCase__ : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ : int = optimizer_cls(params=model.parameters() , lr=UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[str] = (len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=0 , num_training_steps=UpperCamelCase , )
else:
lowerCAmelCase__ : int = DummyScheduler(UpperCamelCase , total_num_steps=UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ : List[str] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase__ : Any = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase__ : Optional[Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
lowerCAmelCase__ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase__ : Optional[Any] = int(UpperCamelCase ) + 1
lowerCAmelCase__ : Dict = evaluation_loop(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
accelerator.print('''resumed checkpoint performance:''' , UpperCamelCase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f:
lowerCAmelCase__ : Union[str, Any] = json.load(UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase__ : List[Any] = {}
for epoch in range(UpperCamelCase , UpperCamelCase ):
model.train()
for step, batch in enumerate(UpperCamelCase ):
lowerCAmelCase__ : Optional[int] = model(**UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = outputs.loss
lowerCAmelCase__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase__ : str = F"""epoch_{epoch}"""
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , UpperCamelCase )
accelerator.save_state(UpperCamelCase )
lowerCAmelCase__ : List[Any] = evaluation_loop(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = accuracy
lowerCAmelCase__ : List[Any] = lr_scheduler.get_lr()[0]
lowerCAmelCase__ : List[Any] = optimizer.param_groups[0]['''lr''']
lowerCAmelCase__ : Union[str, Any] = epoch
lowerCAmelCase__ : Union[str, Any] = overall_step
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , '''w''' ) as f:
json.dump(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( ) -> int:
lowerCAmelCase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=UpperCamelCase , default=UpperCamelCase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=UpperCamelCase , default=UpperCamelCase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCamelCase , default=2 , help='''Number of train epochs.''' , )
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
lowerCAmelCase__ : Optional[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 678 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
lowerCAmelCase__ : int = R'''\w+[.]\d+'''
lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase )
for pat in pats:
lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) )
return key
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) )
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase__ : str = rename_key(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase )
return unflatten_dict(UpperCamelCase )
| 678 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = emb.weight.shape
lowerCAmelCase__ : List[str] = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
lowerCAmelCase__ : str = emb.weight.data
return lin_layer
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase="facebook/mbart-large-en-ro" , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = torch.load(UpperCamelCase , map_location='''cpu''' )['''model''']
remove_ignore_keys_(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase__ : int = MBartConfig.from_pretrained(UpperCamelCase , vocab_size=UpperCamelCase )
if mbart_aa and finetuned:
lowerCAmelCase__ : Dict = '''relu'''
lowerCAmelCase__ : List[Any] = state_dict['''decoder.embed_tokens.weight''']
lowerCAmelCase__ : Optional[int] = MBartForConditionalGeneration(UpperCamelCase )
model.model.load_state_dict(UpperCamelCase )
if finetuned:
lowerCAmelCase__ : Union[str, Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( _lowercase ):
A__ = 'AutoTokenizer'
A__ = ['tokenizer']
A__ = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None ):
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ : Any = speaker_embeddings
@classmethod
def __magic_name__( cls , __UpperCAmelCase , __UpperCAmelCase="speaker_embeddings_path.json" , **__UpperCAmelCase ):
if speaker_embeddings_dict_path is not None:
lowerCAmelCase__ : Optional[int] = get_file_from_repo(
__UpperCAmelCase , __UpperCAmelCase , subfolder=kwargs.pop('''subfolder''' , __UpperCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , __UpperCAmelCase ) , force_download=kwargs.pop('''force_download''' , __UpperCAmelCase ) , proxies=kwargs.pop('''proxies''' , __UpperCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , __UpperCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , __UpperCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , __UpperCAmelCase ) , revision=kwargs.pop('''revision''' , __UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(__UpperCAmelCase , __UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
lowerCAmelCase__ : Tuple = None
else:
with open(__UpperCAmelCase ) as speaker_embeddings_json:
lowerCAmelCase__ : Any = json.load(__UpperCAmelCase )
else:
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
return cls(tokenizer=__UpperCAmelCase , speaker_embeddings=__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase="speaker_embeddings_path.json" , __UpperCAmelCase="speaker_embeddings" , __UpperCAmelCase = False , **__UpperCAmelCase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__UpperCAmelCase , __UpperCAmelCase , '''v2''' ) , exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : Dict = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase__ : int = self._load_voice_preset(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __UpperCAmelCase , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=__UpperCAmelCase , )
lowerCAmelCase__ : Optional[int] = os.path.join(__UpperCAmelCase , f"""{prompt_key}_{key}.npy""" )
lowerCAmelCase__ : Dict = tmp_dict
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , '''w''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
super().save_pretrained(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase = None , **__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = self.speaker_embeddings[voice_preset]
lowerCAmelCase__ : int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
lowerCAmelCase__ : Optional[int] = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __UpperCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , __UpperCAmelCase ) , force_download=kwargs.pop('''force_download''' , __UpperCAmelCase ) , proxies=kwargs.pop('''proxies''' , __UpperCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , __UpperCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , __UpperCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , __UpperCAmelCase ) , revision=kwargs.pop('''revision''' , __UpperCAmelCase ) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
lowerCAmelCase__ : List[str] = np.load(__UpperCAmelCase )
return voice_preset_dict
def __magic_name__( self , __UpperCAmelCase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="pt" , __UpperCAmelCase=256 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , **__UpperCAmelCase , ):
if voice_preset is not None and not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase__ : Optional[int] = self._load_voice_preset(__UpperCAmelCase )
else:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase__ : Dict = voice_preset + '''.npz'''
lowerCAmelCase__ : int = np.load(__UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ : Tuple = BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = self.tokenizer(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , padding='''max_length''' , max_length=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
if voice_preset is not None:
lowerCAmelCase__ : List[Any] = voice_preset
return encoded_text
| 678 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCAmelCase_ = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def __magic_name__( cls ):
lowerCAmelCase__ : Dict = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def __magic_name__( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Any = flatten_dict(modela.params )
lowerCAmelCase__ : List[str] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowerCAmelCase__ : Optional[Any] = False
return models_are_equal
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase )
lowerCAmelCase__ : Dict = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = '''bert'''
lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = '''bert'''
lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 678 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
_lowercase , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self , __UpperCAmelCase ):
if self.framework == "tf":
lowerCAmelCase__ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase__ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCAmelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Any = self.get_masked_index(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __magic_name__( self , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
if return_tensors is None:
lowerCAmelCase__ : Union[str, Any] = self.framework
lowerCAmelCase__ : str = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.ensure_exactly_one_mask_token(__UpperCAmelCase )
return model_inputs
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = self.model(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model_inputs['''input_ids''']
return model_outputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=5 , __UpperCAmelCase=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase__ : Union[str, Any] = target_ids.shape[0]
lowerCAmelCase__ : str = model_outputs['''input_ids'''][0]
lowerCAmelCase__ : str = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase__ : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase__ : Optional[Any] = outputs.numpy()
lowerCAmelCase__ : Tuple = outputs[0, masked_index, :]
lowerCAmelCase__ : Union[str, Any] = stable_softmax(__UpperCAmelCase , axis=-1 )
if target_ids is not None:
lowerCAmelCase__ : Any = tf.gather_nd(tf.squeeze(__UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase__ : List[str] = tf.expand_dims(__UpperCAmelCase , 0 )
lowerCAmelCase__ : int = tf.math.top_k(__UpperCAmelCase , k=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase__ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase__ : Dict = outputs[0, masked_index, :]
lowerCAmelCase__ : Any = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase__ : Optional[int] = probs[..., target_ids]
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = probs.topk(__UpperCAmelCase )
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Union[str, Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase__ : Dict = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase__ : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase__ : str = target_ids[p].tolist()
lowerCAmelCase__ : Any = p
# Filter padding out:
lowerCAmelCase__ : Optional[int] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase__ : Any = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__UpperCAmelCase )
result.append(__UpperCAmelCase )
if single_mask:
return result[0]
return result
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=None ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = [targets]
try:
lowerCAmelCase__ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : Any = []
for target in targets:
lowerCAmelCase__ : Union[str, Any] = vocab.get(__UpperCAmelCase , __UpperCAmelCase )
if id_ is None:
lowerCAmelCase__ : Optional[Any] = self.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , max_length=1 , truncation=__UpperCAmelCase , )['''input_ids''']
if len(__UpperCAmelCase ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase__ : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
lowerCAmelCase__ : Any = list(set(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase__ : str = np.array(__UpperCAmelCase )
return target_ids
def __magic_name__( self , __UpperCAmelCase=None , __UpperCAmelCase=None ):
lowerCAmelCase__ : List[Any] = {}
if targets is not None:
lowerCAmelCase__ : List[Any] = self.get_target_ids(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = target_ids
if top_k is not None:
lowerCAmelCase__ : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 678 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : Optional[Any] = 0
if start < end:
lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : List[Any] = a[pivot]
lowerCAmelCase__ : str = temp
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 )
count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase )
return count
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = a[end]
lowerCAmelCase__ : Optional[int] = a[pivot]
lowerCAmelCase__ : List[str] = temp
lowerCAmelCase__ : str = start - 1
for index in range(UpperCamelCase , UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ : List[str] = new_pivot_index + 1
lowerCAmelCase__ : int = a[new_pivot_index]
lowerCAmelCase__ : int = a[index]
lowerCAmelCase__ : Tuple = temp
lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1]
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : Union[str, Any] = temp
return new_pivot_index + 1, count
lowerCAmelCase_ = TemporaryFile()
lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation
lowerCAmelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase_ = np.load(outfile)
lowerCAmelCase_ = len(M) - 1
lowerCAmelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 678 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
lowerCAmelCase__ : List[str] = str(id_ )
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[Any] = {} # {vertex:distance}
def __lt__( self , __UpperCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def __magic_name__( self , __UpperCAmelCase ):
self.neighbors.append(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Any = weight
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> list:
lowerCAmelCase__ : Optional[int] = []
for u in graph:
lowerCAmelCase__ : List[str] = math.inf
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Optional[int] = graph[:]
while q:
lowerCAmelCase__ : List[Any] = min(UpperCamelCase )
q.remove(UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCAmelCase__ : int = u
lowerCAmelCase__ : List[str] = u.edges[v.id]
for i in range(1 , len(UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Iterator[tuple]:
for u in graph:
lowerCAmelCase__ : Dict = math.inf
lowerCAmelCase__ : str = None
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Tuple = list(UpperCamelCase )
hq.heapify(UpperCamelCase )
while h:
lowerCAmelCase__ : str = hq.heappop(UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCAmelCase__ : Optional[int] = u
lowerCAmelCase__ : Union[str, Any] = u.edges[v.id]
hq.heapify(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __lowerCAmelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : str = features.copy() if features else default_expected_features
lowerCAmelCase__ : List[Any] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = [parquet_path]
lowerCAmelCase__ : int = tmp_path / '''cache'''
lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCAmelCase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase__ : Optional[int] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
if split:
lowerCAmelCase__ : Tuple = {split: parquet_path}
else:
lowerCAmelCase__ : int = '''train'''
lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ : int = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ : Dict = {'''image''': [image_path]}
lowerCAmelCase__ : int = Features({'''image''': Image()} )
lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
assert get_writer_batch_size(UpperCamelCase ) == expected
| 678 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : str = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase__ : List[str] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase__ : Dict = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase__ : Tuple = tf_top_k_top_p_filtering(__UpperCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCAmelCase__ : Any = output[output != -float('''inf''' )]
lowerCAmelCase__ : Optional[int] = tf.cast(
tf.where(tf.not_equal(__UpperCAmelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1e-12 )
tf.debugging.assert_equal(__UpperCAmelCase , __UpperCAmelCase )
@require_tf
class _lowerCAmelCase ( unittest.TestCase , _lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
A__ = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def __magic_name__( self ):
# TF-only test: tf.saved_model export
lowerCAmelCase__ : List[str] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Dict = 2
lowerCAmelCase__ : List[str] = 2
class _lowerCAmelCase ( tf.Module ):
def __init__( self , __UpperCAmelCase ):
super(__UpperCAmelCase , self ).__init__()
lowerCAmelCase__ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=__UpperCAmelCase , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = self.model.generate(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , max_new_tokens=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ : List[str] = [[2, 0], [102, 103]]
lowerCAmelCase__ : Any = [[1, 0], [1, 1]]
lowerCAmelCase__ : Optional[Any] = DummyModel(model=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'''serving_default''': dummy_model.serving} )
lowerCAmelCase__ : Optional[Any] = tf.saved_model.load(__UpperCAmelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(__UpperCAmelCase ) + 1 ):
lowerCAmelCase__ : str = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase__ : List[Any] = serving_func(**__UpperCAmelCase )['''sequences''']
lowerCAmelCase__ : Any = test_model.generate(**__UpperCAmelCase , max_new_tokens=__UpperCAmelCase )
tf.debugging.assert_equal(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __magic_name__( self ):
# TF-only test: tf.saved_model export
lowerCAmelCase__ : int = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Union[str, Any] = 2
class _lowerCAmelCase ( tf.Module ):
def __init__( self , __UpperCAmelCase ):
super(__UpperCAmelCase , self ).__init__()
lowerCAmelCase__ : Any = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=__UpperCAmelCase , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Tuple = self.model.generate(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , max_new_tokens=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ : Dict = [[2], [102, 103]]
lowerCAmelCase__ : List[str] = [[1], [1, 1]]
lowerCAmelCase__ : Optional[int] = DummyModel(model=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'''serving_default''': dummy_model.serving} )
lowerCAmelCase__ : Dict = tf.saved_model.load(__UpperCAmelCase ).signatures['''serving_default''']
for input_row in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ : str = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase__ : List[str] = serving_func(**__UpperCAmelCase )['''sequences''']
lowerCAmelCase__ : int = test_model.generate(**__UpperCAmelCase , max_new_tokens=__UpperCAmelCase )
tf.debugging.assert_equal(__UpperCAmelCase , __UpperCAmelCase )
@slow
@require_tensorflow_text
def __magic_name__( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=__UpperCAmelCase )
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self ):
super().__init__()
lowerCAmelCase__ : Dict = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__UpperCAmelCase , '''spiece.model''' ) , '''rb''' ).read() )
lowerCAmelCase__ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def __magic_name__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
lowerCAmelCase__ : Dict = self.tokenizer.tokenize(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = text.pad_model_inputs(
__UpperCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowerCAmelCase__ : Optional[int] = self.model.generate(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
return self.tokenizer.detokenize(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = CompleteSentenceTransformer()
lowerCAmelCase__ : Optional[int] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
lowerCAmelCase__ : Optional[Any] = complete_model(__UpperCAmelCase )
lowerCAmelCase__ : int = tf.keras.Model(__UpperCAmelCase , __UpperCAmelCase )
keras_model.save(__UpperCAmelCase )
def __magic_name__( self ):
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase__ : Dict = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
lowerCAmelCase__ : List[Any] = 14
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[Any] = '''Hello, my dog is cute and'''
lowerCAmelCase__ : Tuple = tokenizer(__UpperCAmelCase , return_tensors='''tf''' )
lowerCAmelCase__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Any = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowerCAmelCase__ : Optional[int] = model.generate(**__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase__ : Optional[Any] = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowerCAmelCase__ : Union[str, Any] = model.generate(**__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __magic_name__( self ):
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase__ : Union[str, Any] = '''Hugging Face is a technology company based in New York and Paris.'''
lowerCAmelCase__ : List[Any] = bart_tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
lowerCAmelCase__ : Tuple = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase__ : List[str] = bart_model.generate(__UpperCAmelCase ).numpy()
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
return super().call(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase__ : Dict = bart_model.generate(__UpperCAmelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(__UpperCAmelCase , __UpperCAmelCase ) )
class _lowerCAmelCase ( bart_model.model.encoder.__class__ ):
def __magic_name__( self , __UpperCAmelCase , **__UpperCAmelCase ):
return super().call(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCAmelCase__ : List[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase__ : Dict = bart_model.generate(__UpperCAmelCase ).numpy()
with self.assertRaises(__UpperCAmelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__UpperCAmelCase , foo='''bar''' )
| 678 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class _lowerCAmelCase ( _lowercase , _lowercase ):
A__ = 'focalnet'
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : Optional[int] = use_conv_embed
lowerCAmelCase__ : Optional[int] = hidden_sizes
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : Dict = focal_levels
lowerCAmelCase__ : int = focal_windows
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = mlp_ratio
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = drop_path_rate
lowerCAmelCase__ : Tuple = use_layerscale
lowerCAmelCase__ : List[Any] = layerscale_value
lowerCAmelCase__ : Dict = use_post_layernorm
lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation
lowerCAmelCase__ : Dict = normalize_modulator
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Tuple = encoder_stride
lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 678 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=1024 , UpperCamelCase=1024 , UpperCamelCase=False , **UpperCamelCase ) -> Any:
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[Any] = SeqaSeqDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , type_path='''train''' , **UpperCamelCase )
lowerCAmelCase__ : Any = tok.pad_token_id
def get_lens(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = tqdm(
DataLoader(UpperCamelCase , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ : int = []
for batch in dl:
lowerCAmelCase__ : Union[str, Any] = batch['''input_ids'''].ne(UpperCamelCase ).sum(1 ).tolist()
lowerCAmelCase__ : Optional[int] = batch['''labels'''].ne(UpperCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase , UpperCamelCase ):
max_lens.append(max(UpperCamelCase , UpperCamelCase ) )
else:
max_lens.extend(UpperCamelCase )
return max_lens
lowerCAmelCase__ : int = get_lens(UpperCamelCase )
lowerCAmelCase__ : int = SeqaSeqDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , type_path='''val''' , **UpperCamelCase )
lowerCAmelCase__ : Tuple = get_lens(UpperCamelCase )
pickle_save(UpperCamelCase , train_ds.len_file )
pickle_save(UpperCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 678 |
from scipy.stats import pearsonr
import datasets
lowerCAmelCase_ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCAmelCase_ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCAmelCase_ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def __magic_name__( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
if return_pvalue:
lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
| 678 | 1 |
from collections import defaultdict
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[str] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCAmelCase__ : Any = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__UpperCAmelCase ) )
]
lowerCAmelCase__ : List[str] = defaultdict(__UpperCAmelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCAmelCase__ : List[Any] = (1 << len(__UpperCAmelCase )) - 1
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCAmelCase__ : Optional[Any] = self.count_ways_until(__UpperCAmelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowerCAmelCase__ : str = total_ways_util
return self.dp[mask][task_no]
def __magic_name__( self , __UpperCAmelCase ):
# Store the list of persons for each task
for i in range(len(__UpperCAmelCase ) ):
for j in task_performed[i]:
self.task[j].append(__UpperCAmelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCAmelCase_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCAmelCase_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 678 |
from manim import *
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )]
lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 )
lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 )
lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ : List[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 )
lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 678 | 1 |
from math import factorial
def __lowerCAmelCase ( UpperCamelCase = 100 ) -> int:
return sum(int(UpperCamelCase ) for x in str(factorial(UpperCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 678 |
import collections
import os
import re
from pathlib import Path
lowerCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase_ = re.compile(R"""^\s*else:""")
def __lowerCAmelCase ( UpperCamelCase ) -> int:
if _re_test_backend.search(UpperCamelCase ) is None:
return None
lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )]
backends.sort()
return "_and_".join(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ : Union[str, Any] = f.readlines()
lowerCAmelCase__ : Tuple = 0
while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase ):
lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0]
lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase )
if single_line_import_search is not None:
lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ : Any = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase__ : str = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_between_brackets.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_quote_object.search(UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ : Any = []
while (
line_index < len(UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase__ : Tuple = lines[line_index]
lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ : Dict = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase__ : Any = lines[line_index]
lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
def find_duplicates(UpperCamelCase ):
return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ : Optional[Any] = []
for key in import_dict_objects.keys():
lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : Dict = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' )
lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase )
if objects is not None:
lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
lowerCAmelCase__ : str = []
for path, directories, files in os.walk(UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' )
submodules.append(UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(UpperCamelCase )
return submodules
lowerCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __lowerCAmelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase )
lowerCAmelCase__ : int = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
lowerCAmelCase__ : str = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) )
lowerCAmelCase__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 678 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
# Initialise PyTorch model
lowerCAmelCase__ : List[str] = LxmertConfig.from_json_file(UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ : int = LxmertForPreTraining(UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 678 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Union[str, Any] = seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Union[str, Any] = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : List[str] = embedding_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Optional[Any] = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[str] = num_choices
lowerCAmelCase__ : Any = scope
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : str = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
A__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
# test_resize_embeddings = False
A__ = False
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def __magic_name__( self ):
lowerCAmelCase__ : str = MegatronBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __magic_name__( self ):
self.config_tester.run_common_tests()
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
return torch.tensor(
UpperCamelCase , dtype=torch.long , device=UpperCamelCase , )
lowerCAmelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.half()
lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0]
lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj]
lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj]
lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
| 678 | 1 |
import qiskit
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> qiskit.result.counts.Counts:
lowerCAmelCase__ : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowerCAmelCase__ : Any = qiskit.QuantumCircuit(UpperCamelCase , UpperCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCAmelCase__ : List[str] = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 678 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'bart'
A__ = ['past_key_values']
A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : int = d_model
lowerCAmelCase__ : str = encoder_ffn_dim
lowerCAmelCase__ : Any = encoder_layers
lowerCAmelCase__ : Dict = encoder_attention_heads
lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCAmelCase__ : Union[str, Any] = decoder_layers
lowerCAmelCase__ : Any = decoder_attention_heads
lowerCAmelCase__ : Tuple = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : Any = activation_dropout
lowerCAmelCase__ : Optional[Any] = activation_function
lowerCAmelCase__ : Union[str, Any] = init_std
lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase__ : int = decoder_layerdrop
lowerCAmelCase__ : Optional[int] = classifier_dropout
lowerCAmelCase__ : str = use_cache
lowerCAmelCase__ : int = encoder_layers
lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
lowerCAmelCase__ : str = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class _lowerCAmelCase ( _lowercase ):
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ : Any = {0: '''batch'''}
lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Dict = super().outputs
else:
lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
lowerCAmelCase__ : int = seq_length if not self.use_past else 1
lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape
lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads
lowerCAmelCase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : List[Any] = decoder_seq_length + 3
lowerCAmelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase__ : Any = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers
lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : List[str] = seqlen + 2
lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads
lowerCAmelCase__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype
lowerCAmelCase__ : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[Any] = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Tuple = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
lowerCAmelCase__ : int = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 678 | 1 |
def __lowerCAmelCase ( ) -> Optional[int]:
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[Any] = 1
while len(UpperCamelCase ) < 1E6:
constant.append(str(UpperCamelCase ) )
i += 1
lowerCAmelCase__ : Optional[Any] = ''''''.join(UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 678 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'sew-d'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Optional[int] = feat_extract_norm
lowerCAmelCase__ : str = feat_extract_activation
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : Any = list(__UpperCAmelCase )
lowerCAmelCase__ : int = conv_bias
lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings
lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups
lowerCAmelCase__ : int = len(self.conv_dim )
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : int = squeeze_factor
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Any = position_buckets
lowerCAmelCase__ : Optional[int] = share_att_key
lowerCAmelCase__ : Tuple = relative_attention
lowerCAmelCase__ : Optional[int] = norm_rel_ebd
lowerCAmelCase__ : Tuple = list(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_dropout
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : List[Any] = feat_proj_dropout
lowerCAmelCase__ : Any = final_dropout
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = feature_layer_norm_eps
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Tuple = apply_spec_augment
lowerCAmelCase__ : List[str] = mask_time_prob
lowerCAmelCase__ : int = mask_time_length
lowerCAmelCase__ : int = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : int = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# ctc loss
lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# sequence classification
lowerCAmelCase__ : Tuple = use_weighted_layer_sum
lowerCAmelCase__ : Dict = classifier_proj_size
@property
def __magic_name__( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 678 | 1 |
def __lowerCAmelCase ( UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Optional[Any] = str(UpperCamelCase )
while len(UpperCamelCase ) != 1:
lowerCAmelCase__ : List[Any] = [int(UpperCamelCase ) for i in num_string]
lowerCAmelCase__ : str = 1
for i in range(0 , len(UpperCamelCase ) ):
total *= numbers[i]
lowerCAmelCase__ : Any = str(UpperCamelCase )
steps += 1
return steps
def __lowerCAmelCase ( UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : List[str] = str(UpperCamelCase )
while len(UpperCamelCase ) != 1:
lowerCAmelCase__ : Dict = [int(UpperCamelCase ) for i in num_string]
lowerCAmelCase__ : int = 0
for i in range(0 , len(UpperCamelCase ) ):
total += numbers[i]
lowerCAmelCase__ : List[str] = str(UpperCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = '''</s>'''
lowerCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__UpperCAmelCase ) , 1103 )
def __magic_name__( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.'''
lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example''']
lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[int] = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def __magic_name__( self ):
# fmt: off
lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : str = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Tuple = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(
__UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 678 | 1 |
from __future__ import annotations
lowerCAmelCase_ = []
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> bool:
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Tuple = 1
solve(UpperCamelCase , row + 1 )
lowerCAmelCase__ : Dict = 0
return False
def __lowerCAmelCase ( UpperCamelCase ) -> None:
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase_ = 8
lowerCAmelCase_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 678 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'donut-swin'
A__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : List[str] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Optional[Any] = embed_dim
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : str = mlp_ratio
lowerCAmelCase__ : Optional[int] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[str] = use_absolute_embeddings
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 678 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = GPTaTokenizer
A__ = GPTaTokenizerFast
A__ = True
A__ = {'add_prefix_space': True}
A__ = False
def __magic_name__( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
lowerCAmelCase__ : str = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase__ : Tuple = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def __magic_name__( self , **__UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , **__UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = '''lower newer'''
lowerCAmelCase__ : Tuple = '''lower newer'''
return input_text, output_text
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ : Any = '''lower newer'''
lowerCAmelCase__ : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase__ : Dict = tokenizer.tokenize(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__( self ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : Any = self.get_rust_tokenizer(add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ : int = '''lower newer'''
# Testing tokenization
lowerCAmelCase__ : List[str] = tokenizer.tokenize(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ : Dict = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing conversion to ids without special tokens
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing conversion to ids with special tokens
lowerCAmelCase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ : Dict = tokenizer.encode(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ : int = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing the unknown token
lowerCAmelCase__ : Tuple = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __magic_name__( self , __UpperCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# Simple input
lowerCAmelCase__ : Optional[Any] = '''This is a simple input'''
lowerCAmelCase__ : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCAmelCase__ : int = ('''This is a simple input''', '''This is a pair''')
lowerCAmelCase__ : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , )
def __magic_name__( self ):
lowerCAmelCase__ : int = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
lowerCAmelCase__ : Any = '''This is a simple input'''
lowerCAmelCase__ : int = ['''This is a simple input looooooooong''', '''This is a simple input''']
lowerCAmelCase__ : List[str] = ('''This is a simple input''', '''This is a pair''')
lowerCAmelCase__ : int = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
lowerCAmelCase__ : Union[str, Any] = tokenizer.pad_token_id
lowerCAmelCase__ : Optional[int] = tokenizer(__UpperCAmelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
lowerCAmelCase__ : Optional[Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncate=__UpperCAmelCase , return_tensors='''np''' )
lowerCAmelCase__ : Dict = tokenizer(*__UpperCAmelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
lowerCAmelCase__ : Tuple = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncate=__UpperCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __magic_name__( self ):
lowerCAmelCase__ : str = '''$$$'''
lowerCAmelCase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__UpperCAmelCase , add_bos_token=__UpperCAmelCase )
lowerCAmelCase__ : Any = '''This is a simple input'''
lowerCAmelCase__ : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCAmelCase__ : List[str] = tokenizer.bos_token_id
lowerCAmelCase__ : str = tokenizer(__UpperCAmelCase )
lowerCAmelCase__ : str = tokenizer(__UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , __UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase__ : List[str] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase__ : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __magic_name__( self ):
pass
def __magic_name__( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowerCAmelCase__ : Any = [self.get_tokenizer(do_lower_case=__UpperCAmelCase , add_bos_token=__UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ : List[str] = '''Encode this.'''
lowerCAmelCase__ : Optional[Any] = '''This one too please.'''
lowerCAmelCase__ : List[str] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
encoded_sequence += tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tokenizer.encode_plus(
__UpperCAmelCase , __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , )
lowerCAmelCase__ : List[str] = encoded_sequence_dict['''input_ids''']
lowerCAmelCase__ : Tuple = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
lowerCAmelCase__ : List[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__UpperCAmelCase )
]
lowerCAmelCase__ : Optional[int] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = '''A photo of a cat'''
lowerCAmelCase__ : Optional[int] = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''test_opt''' )
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''./test_opt''' )
lowerCAmelCase__ : Dict = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__UpperCAmelCase )
lowerCAmelCase__ : int = '''A photo of a cat'''
lowerCAmelCase__ : Optional[int] = tokenizer.encode(
__UpperCAmelCase , )
# Same as above
self.assertEqual(__UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__UpperCAmelCase )
lowerCAmelCase__ : str = '''bos'''
lowerCAmelCase__ : Dict = tokenizer.get_vocab()['''bos''']
lowerCAmelCase__ : int = '''A photo of a cat'''
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(
__UpperCAmelCase , )
# We changed the bos token
self.assertEqual(__UpperCAmelCase , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''./tok''' )
lowerCAmelCase__ : Any = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
lowerCAmelCase__ : List[str] = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [3_1957, 250, 1345, 9, 10, 4758] )
| 678 |
lowerCAmelCase_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCAmelCase_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' )
lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' )
lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : Tuple = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : List[Any] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase__ : int = 1
if from_exponent > to_exponent:
lowerCAmelCase__ : List[str] = from_exponent - to_exponent
else:
lowerCAmelCase__ : Dict = -(to_exponent - from_exponent)
return value * pow(10 , UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 678 | 1 |
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ : int = len(UpperCamelCase ), len(grid[0] )
if (
min(UpperCamelCase , UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCAmelCase__ : List[str] = 0
count += depth_first_search(UpperCamelCase , row + 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , row - 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col + 1 , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col - 1 , UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__UpperCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 678 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 |
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
# Return True if there is node that has not iterated.
lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase )
lowerCAmelCase__ : Tuple = [s]
lowerCAmelCase__ : Dict = True
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = u
return visited[t]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase ))
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = float('''Inf''' )
lowerCAmelCase__ : Dict = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] )
lowerCAmelCase__ : List[Any] = parent[s]
max_flow += path_flow
lowerCAmelCase__ : List[Any] = sink
while v != source:
lowerCAmelCase__ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase__ : Optional[Any] = parent[v]
for i in range(len(UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 678 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase_ = {
"""albert-base-v1""": 5_12,
"""albert-large-v1""": 5_12,
"""albert-xlarge-v1""": 5_12,
"""albert-xxlarge-v1""": 5_12,
"""albert-base-v2""": 5_12,
"""albert-large-v2""": 5_12,
"""albert-xlarge-v2""": 5_12,
"""albert-xxlarge-v2""": 5_12,
}
lowerCAmelCase_ = """▁"""
class _lowerCAmelCase ( _lowercase ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = AlbertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ : str = (
AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else mask_token
)
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ : Optional[int] = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : List[str] = keep_accents
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : Optional[Any] = False if not self.vocab_file else True
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[str] = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 678 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
A__ = MODEL_FOR_CAUSAL_LM_MAPPING
A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id
lowerCAmelCase__ : List[Any] = '''<pad>'''
lowerCAmelCase__ : List[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def __magic_name__( self ):
lowerCAmelCase__ : Any = '''Hello I believe in'''
lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator.model
lowerCAmelCase__ : Optional[int] = text_generator.tokenizer
lowerCAmelCase__ : Tuple = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ : List[str] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ : str = text_generator('''''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
# Classic `model_kwargs`
lowerCAmelCase__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''Hello world'''
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 678 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
lowerCAmelCase_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
lowerCAmelCase_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
lowerCAmelCase_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
lowerCAmelCase_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> int:
for tf_name, hf_name in patterns:
lowerCAmelCase__ : List[Any] = k.replace(UpperCamelCase , UpperCamelCase )
return k
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> BigBirdPegasusForConditionalGeneration:
lowerCAmelCase__ : List[str] = BigBirdPegasusConfig(**UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = BigBirdPegasusForConditionalGeneration(UpperCamelCase )
lowerCAmelCase__ : str = torch_model.state_dict()
lowerCAmelCase__ : Tuple = {}
# separating decoder weights
lowerCAmelCase__ : Any = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
lowerCAmelCase__ : Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
lowerCAmelCase__ : List[Any] = [k.endswith(UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(UpperCamelCase ):
continue
lowerCAmelCase__ : Tuple = DECODER_PATTERNS
lowerCAmelCase__ : List[Any] = rename_state_dict_key(UpperCamelCase , UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
lowerCAmelCase__ : Union[str, Any] = v.T
lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
lowerCAmelCase__ : Tuple = [k.endswith(UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(UpperCamelCase ):
continue
lowerCAmelCase__ : List[str] = REMAINING_PATTERNS
lowerCAmelCase__ : Union[str, Any] = rename_state_dict_key(UpperCamelCase , UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
lowerCAmelCase__ : Optional[Any] = v.T
lowerCAmelCase__ : Union[str, Any] = torch.from_numpy(UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
lowerCAmelCase__ : List[Any] = mapping['''model.embed_positions.weight''']
lowerCAmelCase__ : Tuple = mapping.pop('''model.embed_positions.weight''' )
lowerCAmelCase__ , lowerCAmelCase__ : Any = torch_model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Any = tf.train.list_variables(UpperCamelCase )
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : Union[str, Any] = ['''global_step''']
for name, shape in tqdm(UpperCamelCase , desc='''converting tf checkpoint to dict''' ):
lowerCAmelCase__ : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCAmelCase__ : Tuple = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = array
return tf_weights
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = get_tf_weights_as_numpy(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = convert_bigbird_pegasus(UpperCamelCase , UpperCamelCase )
torch_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 678 |
def __lowerCAmelCase ( UpperCamelCase ) -> str:
return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] )
def __lowerCAmelCase ( UpperCamelCase ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(UpperCamelCase ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCAmelCase_ = trt.Logger(trt.Logger.WARNING)
lowerCAmelCase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=3_84,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=1_28,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
lowerCAmelCase_ = parser.parse_args()
if args.tokenizer_name:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
lowerCAmelCase_ = args.per_device_eval_batch_size
lowerCAmelCase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCAmelCase_ = True
lowerCAmelCase_ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
lowerCAmelCase_ = """temp_engine/bert-fp16.engine"""
if args.inta:
lowerCAmelCase_ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
lowerCAmelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCAmelCase_ = [network.get_input(i) for i in range(network.num_inputs)]
lowerCAmelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCAmelCase_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCAmelCase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCAmelCase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : str = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowerCAmelCase__ : Union[str, Any] = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowerCAmelCase__ : Optional[Any] = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , UpperCamelCase )
# start time
lowerCAmelCase__ : str = time.time()
# Run inference
context.execute_async(
bindings=[int(UpperCamelCase ) for d_inp in d_inputs] + [int(UpperCamelCase ), int(UpperCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(UpperCamelCase , UpperCamelCase , UpperCamelCase )
cuda.memcpy_dtoh_async(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase__ : Union[str, Any] = time.time()
lowerCAmelCase__ : List[Any] = end_time - start_time
lowerCAmelCase__ : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCAmelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCAmelCase_ = raw_datasets["""validation"""].column_names
lowerCAmelCase_ = """question""" if """question""" in column_names else column_names[0]
lowerCAmelCase_ = """context""" if """context""" in column_names else column_names[1]
lowerCAmelCase_ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCAmelCase_ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
lowerCAmelCase_ = min(args.max_seq_length, tokenizer.model_max_length)
def __lowerCAmelCase ( UpperCamelCase ) -> int:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowerCAmelCase__ : Tuple = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase__ : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=UpperCamelCase , stride=args.doc_stride , return_overflowing_tokens=UpperCamelCase , return_offsets_mapping=UpperCamelCase , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase__ : int = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase__ : Any = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase__ : Optional[int] = tokenized_examples.sequence_ids(UpperCamelCase )
lowerCAmelCase__ : str = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase__ : str = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase__ : Optional[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
lowerCAmelCase_ = raw_datasets["""validation"""]
# Validation Feature Creation
lowerCAmelCase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
lowerCAmelCase_ = default_data_collator
lowerCAmelCase_ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
lowerCAmelCase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="eval" ) -> Optional[int]:
# Post-processing: we match the start logits and end logits to answers in the original context.
lowerCAmelCase__ : List[str] = postprocess_qa_predictions(
examples=UpperCamelCase , features=UpperCamelCase , predictions=UpperCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=UpperCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase__ : List[Any] = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase__ : Optional[int] = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowerCAmelCase__ : List[str] = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=UpperCamelCase , label_ids=UpperCamelCase )
lowerCAmelCase_ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
return trt.volume(engine.get_binding_shape(UpperCamelCase ) ) * engine.get_binding_dtype(UpperCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
lowerCAmelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCAmelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCAmelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCAmelCase_ = cuda.mem_alloc(h_outputa.nbytes)
lowerCAmelCase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCAmelCase_ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 0
lowerCAmelCase_ = timeit.default_timer()
lowerCAmelCase_ = None
for step, batch in enumerate(eval_dataloader):
lowerCAmelCase_ , lowerCAmelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCAmelCase_ , lowerCAmelCase_ = outputs
lowerCAmelCase_ = torch.tensor(start_logits)
lowerCAmelCase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCAmelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCAmelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCAmelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCAmelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCAmelCase_ = nested_truncate(all_preds, len(eval_dataset))
lowerCAmelCase_ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 10_00 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 10_00))
logger.info("""Total Number of Inference = %d""", niter)
lowerCAmelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCAmelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 678 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _lowerCAmelCase ( _lowercase ):
A__ = (DPMSolverSDEScheduler,)
A__ = 10
def __magic_name__( self , **__UpperCAmelCase ):
lowerCAmelCase__ : Dict = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__UpperCAmelCase )
return config
def __magic_name__( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def __magic_name__( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase )
def __magic_name__( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def __magic_name__( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = output.prev_sample
lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 678 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class _lowerCAmelCase :
A__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ = field(
default=_lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ = field(
default=_lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ = field(
default=_lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class _lowerCAmelCase :
A__ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ = field(
default=_lowercase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __lowerCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
lowerCAmelCase__ : List[Any] = processors[data_args.task_name]()
lowerCAmelCase__ : Dict = processor.get_labels()
lowerCAmelCase__ : List[str] = len(UpperCamelCase )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase__ : Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase__ : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCamelCase , p.label_ids )}
# Data collator
lowerCAmelCase__ : List[Any] = DataCollatorWithPadding(UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase__ : int = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ : Dict = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ : Union[str, Any] = trainer.evaluate()
lowerCAmelCase__ : int = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , UpperCamelCase , UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCamelCase )
return results
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 678 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = 3
lowerCAmelCase__ : Tuple = 250
lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : List[str] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__UpperCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
| 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowerCAmelCase ( UpperCamelCase = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(UpperCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 678 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'conditional_detr'
A__ = ['past_key_values']
A__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=3 , __UpperCAmelCase=300 , __UpperCAmelCase=6 , __UpperCAmelCase=2048 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=2048 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase="sine" , __UpperCAmelCase="resnet50" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.25 , **__UpperCAmelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCAmelCase__ : List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = backbone_config.get('''model_type''' )
lowerCAmelCase__ : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ : List[str] = config_class.from_dict(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = use_timm_backbone
lowerCAmelCase__ : Optional[int] = backbone_config
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : int = num_queries
lowerCAmelCase__ : Any = d_model
lowerCAmelCase__ : Any = encoder_ffn_dim
lowerCAmelCase__ : Optional[int] = encoder_layers
lowerCAmelCase__ : Tuple = encoder_attention_heads
lowerCAmelCase__ : Optional[Any] = decoder_ffn_dim
lowerCAmelCase__ : Dict = decoder_layers
lowerCAmelCase__ : List[Any] = decoder_attention_heads
lowerCAmelCase__ : Dict = dropout
lowerCAmelCase__ : List[Any] = attention_dropout
lowerCAmelCase__ : Any = activation_dropout
lowerCAmelCase__ : Union[str, Any] = activation_function
lowerCAmelCase__ : str = init_std
lowerCAmelCase__ : List[str] = init_xavier_std
lowerCAmelCase__ : Optional[Any] = encoder_layerdrop
lowerCAmelCase__ : Any = decoder_layerdrop
lowerCAmelCase__ : str = encoder_layers
lowerCAmelCase__ : int = auxiliary_loss
lowerCAmelCase__ : Any = position_embedding_type
lowerCAmelCase__ : Optional[int] = backbone
lowerCAmelCase__ : Optional[int] = use_pretrained_backbone
lowerCAmelCase__ : Tuple = dilation
# Hungarian matcher
lowerCAmelCase__ : Any = class_cost
lowerCAmelCase__ : Tuple = bbox_cost
lowerCAmelCase__ : Dict = giou_cost
# Loss coefficients
lowerCAmelCase__ : Dict = mask_loss_coefficient
lowerCAmelCase__ : Union[str, Any] = dice_loss_coefficient
lowerCAmelCase__ : Tuple = cls_loss_coefficient
lowerCAmelCase__ : Dict = bbox_loss_coefficient
lowerCAmelCase__ : List[Any] = giou_loss_coefficient
lowerCAmelCase__ : int = focal_alpha
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def __magic_name__( self ):
return self.encoder_attention_heads
@property
def __magic_name__( self ):
return self.d_model
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase__ : Optional[Any] = self.backbone_config.to_dict()
lowerCAmelCase__ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( _lowercase ):
A__ = version.parse('1.11' )
@property
def __magic_name__( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __magic_name__( self ):
return 1e-5
@property
def __magic_name__( self ):
return 12
| 678 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
lowerCAmelCase__ : int = R'''\w+[.]\d+'''
lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase )
for pat in pats:
lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) )
return key
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) )
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase__ : str = rename_key(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase )
return unflatten_dict(UpperCamelCase )
| 678 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : Tuple = 1.5
lowerCAmelCase__ : Tuple = int(factor * num_class_images )
lowerCAmelCase__ : List[Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=UpperCamelCase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase__ : int = client.query(text=UpperCamelCase )
if len(UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowerCAmelCase__ : Tuple = int(factor * num_images )
lowerCAmelCase__ : Tuple = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=UpperCamelCase , aesthetic_weight=0.1 , )
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[Any] = tqdm(desc='''downloading real regularization images''' , total=UpperCamelCase )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowerCAmelCase__ : List[str] = class_images[count]
count += 1
try:
lowerCAmelCase__ : str = requests.get(images['''url'''] )
if img.status_code == 200:
lowerCAmelCase__ : List[str] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCAmelCase ( ) -> List[Any]:
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser('''''' , add_help=UpperCamelCase )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=UpperCamelCase , type=UpperCamelCase )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=UpperCamelCase , type=UpperCamelCase )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 | 1 |
import warnings
from .generation import TFGenerationMixin
class _lowerCAmelCase ( _lowercase ):
# warning at import time
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , _lowercase , )
| 678 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCAmelCase_ = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def __magic_name__( cls ):
lowerCAmelCase__ : Dict = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def __magic_name__( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Any = flatten_dict(modela.params )
lowerCAmelCase__ : List[str] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowerCAmelCase__ : Optional[Any] = False
return models_are_equal
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase )
lowerCAmelCase__ : Dict = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = '''bert'''
lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = '''bert'''
lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 678 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> np.array:
lowerCAmelCase__ : List[str] = F"""{sampling_rate}"""
lowerCAmelCase__ : List[Any] = '''1'''
lowerCAmelCase__ : Any = '''f32le'''
lowerCAmelCase__ : Dict = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(UpperCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase__ : Tuple = ffmpeg_process.communicate(UpperCamelCase )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
lowerCAmelCase__ : Optional[int] = output_stream[0]
lowerCAmelCase__ : Tuple = np.frombuffer(UpperCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = "f32le" , ) -> Tuple:
lowerCAmelCase__ : Tuple = F"""{sampling_rate}"""
lowerCAmelCase__ : Tuple = '''1'''
if format_for_conversion == "s16le":
lowerCAmelCase__ : List[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowerCAmelCase__ : str = platform.system()
if system == "Linux":
lowerCAmelCase__ : Dict = '''alsa'''
lowerCAmelCase__ : Optional[Any] = '''default'''
elif system == "Darwin":
lowerCAmelCase__ : Optional[Any] = '''avfoundation'''
lowerCAmelCase__ : List[Any] = ''':0'''
elif system == "Windows":
lowerCAmelCase__ : Any = '''dshow'''
lowerCAmelCase__ : Optional[int] = '''default'''
lowerCAmelCase__ : int = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
lowerCAmelCase__ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase__ : int = _ffmpeg_stream(UpperCamelCase , UpperCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "f32le" , ) -> List[str]:
if stream_chunk_s is not None:
lowerCAmelCase__ : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase__ : Optional[Any] = chunk_length_s
lowerCAmelCase__ : Dict = ffmpeg_microphone(UpperCamelCase , UpperCamelCase , format_for_conversion=UpperCamelCase )
if format_for_conversion == "s16le":
lowerCAmelCase__ : str = np.intaa
lowerCAmelCase__ : List[str] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : Any = np.floataa
lowerCAmelCase__ : List[str] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowerCAmelCase__ : Dict = chunk_length_s / 6
lowerCAmelCase__ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCamelCase , (int, float) ):
lowerCAmelCase__ : Any = [stride_length_s, stride_length_s]
lowerCAmelCase__ : Dict = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase__ : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase__ : Tuple = datetime.datetime.now()
lowerCAmelCase__ : Union[str, Any] = datetime.timedelta(seconds=UpperCamelCase )
for item in chunk_bytes_iter(UpperCamelCase , UpperCamelCase , stride=(stride_left, stride_right) , stream=UpperCamelCase ):
# Put everything back in numpy scale
lowerCAmelCase__ : List[str] = np.frombuffer(item['''raw'''] , dtype=UpperCamelCase )
lowerCAmelCase__ : str = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
lowerCAmelCase__ : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = False ) -> Tuple:
lowerCAmelCase__ : Tuple = b''''''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowerCAmelCase__ : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(UpperCamelCase ) < chunk_len:
lowerCAmelCase__ : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase__ : List[Any] = (_stride_left, stride_right)
lowerCAmelCase__ : Dict = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
lowerCAmelCase__ : List[str] = False
yield item
lowerCAmelCase__ : List[Any] = stride_left
lowerCAmelCase__ : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCamelCase ) > stride_left:
lowerCAmelCase__ : Any = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
lowerCAmelCase__ : Dict = False
yield item
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> str:
lowerCAmelCase__ : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(UpperCamelCase , stdout=subprocess.PIPE , bufsize=UpperCamelCase ) as ffmpeg_process:
while True:
lowerCAmelCase__ : str = ffmpeg_process.stdout.read(UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 678 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : Optional[Any] = 0
if start < end:
lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : List[Any] = a[pivot]
lowerCAmelCase__ : str = temp
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 )
count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase )
return count
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = a[end]
lowerCAmelCase__ : Optional[int] = a[pivot]
lowerCAmelCase__ : List[str] = temp
lowerCAmelCase__ : str = start - 1
for index in range(UpperCamelCase , UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ : List[str] = new_pivot_index + 1
lowerCAmelCase__ : int = a[new_pivot_index]
lowerCAmelCase__ : int = a[index]
lowerCAmelCase__ : Tuple = temp
lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1]
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : Union[str, Any] = temp
return new_pivot_index + 1, count
lowerCAmelCase_ = TemporaryFile()
lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation
lowerCAmelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase_ = np.load(outfile)
lowerCAmelCase_ = len(M) - 1
lowerCAmelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 678 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> List[Any]:
lowerCAmelCase__ : str = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowerCAmelCase__ , lowerCAmelCase__ : str = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowerCAmelCase__ : List[Any] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase )
assert base_extractor.is_extractable(UpperCamelCase )
lowerCAmelCase__ : int = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(UpperCamelCase , UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCAmelCase__ : Optional[int] = file_path.read_text(encoding='''utf-8''' )
else:
lowerCAmelCase__ : int = output_path.read_text(encoding='''utf-8''' )
lowerCAmelCase__ : str = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowerCAmelCase__ : Optional[int] = input_paths[compression_format]
if input_path is None:
lowerCAmelCase__ : str = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = Extractor.infer_extractor_format(UpperCamelCase )
assert extractor_format is not None
lowerCAmelCase__ : Optional[int] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCAmelCase__ : Union[str, Any] = file_path.read_text(encoding='''utf-8''' )
else:
lowerCAmelCase__ : Optional[int] = output_path.read_text(encoding='''utf-8''' )
lowerCAmelCase__ : Optional[Any] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
import tarfile
lowerCAmelCase__ : List[str] = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowerCAmelCase__ : Optional[Any] = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(UpperCamelCase , '''w''' ) as f:
f.add(UpperCamelCase , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
import tarfile
lowerCAmelCase__ : Optional[Any] = tmp_path / '''data_sym_link'''
directory.mkdir()
lowerCAmelCase__ : List[Any] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=UpperCamelCase )
with tarfile.TarFile(UpperCamelCase , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : str = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowerCAmelCase__ : Any = insecure_tar_files[insecure_tar_file]
lowerCAmelCase__ : str = tmp_path / '''extracted'''
TarExtractor.extract(UpperCamelCase , UpperCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowerCAmelCase__ : str = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowerCAmelCase__ : Dict = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(UpperCamelCase )
assert zipfile.is_zipfile(str(UpperCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase ) # but we're right
| 678 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : str = features.copy() if features else default_expected_features
lowerCAmelCase__ : List[Any] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = [parquet_path]
lowerCAmelCase__ : int = tmp_path / '''cache'''
lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCAmelCase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase__ : Optional[int] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
if split:
lowerCAmelCase__ : Tuple = {split: parquet_path}
else:
lowerCAmelCase__ : int = '''train'''
lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ : int = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ : Dict = {'''image''': [image_path]}
lowerCAmelCase__ : int = Features({'''image''': Image()} )
lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
assert get_writer_batch_size(UpperCamelCase ) == expected
| 678 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Dict:
lowerCAmelCase__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ : List[str] = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : int = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ : Any = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : int = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : str = dct.pop(UpperCamelCase )
lowerCAmelCase__ : Any = val
@torch.no_grad()
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : str = False
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Dict = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Any = 3129
lowerCAmelCase__ : Union[str, Any] = '''huggingface/label-files'''
lowerCAmelCase__ : str = '''vqa2-id2label.json'''
lowerCAmelCase__ : int = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ : Optional[Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : List[str] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : str = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[Any] = 2
lowerCAmelCase__ : Optional[Any] = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Tuple = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Tuple = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ : int = True
lowerCAmelCase__ : str = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : List[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ : Optional[int] = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
lowerCAmelCase__ : List[str] = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
lowerCAmelCase__ : str = ViltImageProcessor(size=384 )
lowerCAmelCase__ : Any = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ : int = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=UpperCamelCase ).raw )
lowerCAmelCase__ : List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=UpperCamelCase ).raw )
lowerCAmelCase__ : List[Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ : Optional[int] = processor(UpperCamelCase , UpperCamelCase , return_tensors='''pt''' )
lowerCAmelCase__ : int = processor(UpperCamelCase , UpperCamelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Tuple = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ : List[Any] = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=UpperCamelCase ).raw )
if mlm_model:
lowerCAmelCase__ : Any = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ : Any = '''How many cats are there?'''
lowerCAmelCase__ : Optional[Any] = processor(UpperCamelCase , UpperCamelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[int] = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
lowerCAmelCase__ : Optional[Any] = torch.Size([1, 11, 30522] )
lowerCAmelCase__ : Optional[int] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ : List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ : Optional[int] = torch.Size([1, 3129] )
lowerCAmelCase__ : Optional[int] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ : Any = torch.Size([1, 2] )
lowerCAmelCase__ : List[str] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase_ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 678 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class _lowerCAmelCase ( _lowercase , _lowercase ):
A__ = 'focalnet'
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : Optional[int] = use_conv_embed
lowerCAmelCase__ : Optional[int] = hidden_sizes
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : Dict = focal_levels
lowerCAmelCase__ : int = focal_windows
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = mlp_ratio
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = drop_path_rate
lowerCAmelCase__ : Tuple = use_layerscale
lowerCAmelCase__ : List[Any] = layerscale_value
lowerCAmelCase__ : Dict = use_post_layernorm
lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation
lowerCAmelCase__ : Dict = normalize_modulator
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Tuple = encoder_stride
lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 678 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCAmelCase ( UpperCamelCase ) -> str:
return "".join(sorted(UpperCamelCase ) )
def __lowerCAmelCase ( UpperCamelCase ) -> list[str]:
return word_by_signature[signature(UpperCamelCase )]
lowerCAmelCase_ = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
lowerCAmelCase_ = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase_ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase_ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 678 |
from scipy.stats import pearsonr
import datasets
lowerCAmelCase_ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCAmelCase_ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCAmelCase_ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def __magic_name__( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
if return_pvalue:
lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
| 678 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
return max(metric_fn(UpperCamelCase , UpperCamelCase ) for gt in ground_truths )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = [line.strip() for line in open(UpperCamelCase , '''r''' ).readlines()]
lowerCAmelCase__ : Tuple = []
if args.gold_data_mode == "qa":
lowerCAmelCase__ : Union[str, Any] = pd.read_csv(UpperCamelCase , sep='''\t''' , header=UpperCamelCase )
for answer_list in data[1]:
lowerCAmelCase__ : Any = ast.literal_eval(UpperCamelCase )
answers.append(UpperCamelCase )
else:
lowerCAmelCase__ : Optional[int] = [line.strip() for line in open(UpperCamelCase , '''r''' ).readlines()]
lowerCAmelCase__ : str = [[reference] for reference in references]
lowerCAmelCase__ : List[Any] = 0
for prediction, ground_truths in zip(UpperCamelCase , UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(UpperCamelCase , UpperCamelCase , UpperCamelCase )
fa += metric_max_over_ground_truths(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = 100.0 * em / total
lowerCAmelCase__ : Dict = 100.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : str = args.k
lowerCAmelCase__ : str = [line.strip() for line in open(UpperCamelCase , '''r''' ).readlines()]
lowerCAmelCase__ : str = [line.strip() for line in open(UpperCamelCase , '''r''' ).readlines()]
lowerCAmelCase__ : Optional[int] = 0
for hypo, reference in zip(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[str] = set(hypo.split('''\t''' )[:k] )
lowerCAmelCase__ : int = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCAmelCase__ : List[str] = 100.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
def strip_title(UpperCamelCase ):
if title.startswith('''"''' ):
lowerCAmelCase__ : List[Any] = title[1:]
if title.endswith('''"''' ):
lowerCAmelCase__ : str = title[:-1]
return title
lowerCAmelCase__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCamelCase , return_tensors='''pt''' , padding=UpperCamelCase , truncation=UpperCamelCase , )['''input_ids'''].to(args.device )
lowerCAmelCase__ : str = rag_model.rag.question_encoder(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = question_enc_outputs[0]
lowerCAmelCase__ : Dict = rag_model.retriever(
UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
lowerCAmelCase__ : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCAmelCase__ : str = []
for docs in all_docs:
lowerCAmelCase__ : str = [strip_title(UpperCamelCase ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(UpperCamelCase ) )
return provenance_strings
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
lowerCAmelCase__ : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCamelCase , return_tensors='''pt''' , padding=UpperCamelCase , truncation=UpperCamelCase )
lowerCAmelCase__ : int = inputs_dict.input_ids.to(args.device )
lowerCAmelCase__ : str = inputs_dict.attention_mask.to(args.device )
lowerCAmelCase__ : Any = rag_model.generate( # rag_model overwrites generate
UpperCamelCase , attention_mask=UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCAmelCase__ : Any = rag_model.retriever.generator_tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
if args.print_predictions:
for q, a in zip(UpperCamelCase , UpperCamelCase ):
logger.info('''Q: {} - A: {}'''.format(UpperCamelCase , UpperCamelCase ) )
return answers
def __lowerCAmelCase ( ) -> List[Any]:
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=UpperCamelCase , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=UpperCamelCase , choices=['''exact''', '''compressed''', '''legacy'''] , type=UpperCamelCase , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=UpperCamelCase , type=UpperCamelCase , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=UpperCamelCase , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=UpperCamelCase , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=UpperCamelCase , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=UpperCamelCase , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=UpperCamelCase , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=UpperCamelCase , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=UpperCamelCase , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=UpperCamelCase , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=UpperCamelCase , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
lowerCAmelCase__ : str = parser.parse_args()
lowerCAmelCase__ : str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
lowerCAmelCase__ : int = {}
if args.model_type is None:
lowerCAmelCase__ : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
lowerCAmelCase__ : int = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
lowerCAmelCase__ : str = args.n_docs
if args.index_name is not None:
lowerCAmelCase__ : str = args.index_name
if args.index_path is not None:
lowerCAmelCase__ : int = args.index_path
else:
lowerCAmelCase__ : Dict = BartForConditionalGeneration
lowerCAmelCase__ : Optional[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , UpperCamelCase )
lowerCAmelCase__ : str = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
lowerCAmelCase__ : List[Any] = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(UpperCamelCase ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
lowerCAmelCase__ : Optional[int] = RagRetriever.from_pretrained(UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : List[Any] = model_class.from_pretrained(UpperCamelCase , retriever=UpperCamelCase , **UpperCamelCase )
model.retriever.init_retrieval()
else:
lowerCAmelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
lowerCAmelCase__ : List[Any] = []
for line in tqdm(UpperCamelCase ):
questions.append(line.strip() )
if len(UpperCamelCase ) == args.eval_batch_size:
lowerCAmelCase__ : List[str] = evaluate_batch_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
preds_file.write('''\n'''.join(UpperCamelCase ) + '''\n''' )
preds_file.flush()
lowerCAmelCase__ : Optional[int] = []
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : int = evaluate_batch_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
preds_file.write('''\n'''.join(UpperCamelCase ) )
preds_file.flush()
score_fn(UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args)
| 678 |
from manim import *
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )]
lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 )
lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 )
lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ : List[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 )
lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 678 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
A__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ = field(
default=_lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
A__ = field(
default=_lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ = field(default=_lowercase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A__ = field(
default=_lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class _lowerCAmelCase :
A__ = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
A__ = field(
default=_lowercase , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
A__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ = field(
default=_lowercase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __lowerCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowerCAmelCase__ : int = import_module('''tasks''' )
try:
lowerCAmelCase__ : Dict = getattr(UpperCamelCase , model_args.task_type )
lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCAmelCase__ : Optional[int] = token_classification_task.get_labels(data_args.labels )
lowerCAmelCase__ : Dict[int, str] = dict(enumerate(UpperCamelCase ) )
lowerCAmelCase__ : str = len(UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase , idalabel=UpperCamelCase , labelaid={label: i for i, label in enumerate(UpperCamelCase )} , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCAmelCase__ : int = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase__ : Tuple = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase , data_dir=data_args.data_dir , tokenizer=UpperCamelCase , labels=UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase__ : Optional[int] = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase , data_dir=data_args.data_dir , tokenizer=UpperCamelCase , labels=UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCamelCase , UpperCamelCase ) -> Tuple[List[int], List[int]]:
lowerCAmelCase__ : int = np.argmax(UpperCamelCase , axis=2 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = preds.shape
lowerCAmelCase__ : List[Any] = [[] for _ in range(UpperCamelCase )]
lowerCAmelCase__ : Tuple = [[] for _ in range(UpperCamelCase )]
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCamelCase ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase , UpperCamelCase ),
"precision": precision_score(UpperCamelCase , UpperCamelCase ),
"recall": recall_score(UpperCamelCase , UpperCamelCase ),
"f1": fa_score(UpperCamelCase , UpperCamelCase ),
}
# Data collator
lowerCAmelCase__ : Optional[Any] = DataCollatorWithPadding(UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase__ : Dict = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ : List[str] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ : List[str] = trainer.evaluate()
lowerCAmelCase__ : str = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , UpperCamelCase , UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCamelCase )
# Predict
if training_args.do_predict:
lowerCAmelCase__ : Any = TokenClassificationDataset(
token_classification_task=UpperCamelCase , data_dir=data_args.data_dir , tokenizer=UpperCamelCase , labels=UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = trainer.predict(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = align_predictions(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , UpperCamelCase , UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowerCAmelCase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return results
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 678 |
import collections
import os
import re
from pathlib import Path
lowerCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase_ = re.compile(R"""^\s*else:""")
def __lowerCAmelCase ( UpperCamelCase ) -> int:
if _re_test_backend.search(UpperCamelCase ) is None:
return None
lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )]
backends.sort()
return "_and_".join(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ : Union[str, Any] = f.readlines()
lowerCAmelCase__ : Tuple = 0
while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase ):
lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0]
lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase )
if single_line_import_search is not None:
lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ : Any = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase__ : str = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_between_brackets.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_quote_object.search(UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ : Any = []
while (
line_index < len(UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase__ : Tuple = lines[line_index]
lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ : Dict = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase__ : Any = lines[line_index]
lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
def find_duplicates(UpperCamelCase ):
return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ : Optional[Any] = []
for key in import_dict_objects.keys():
lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : Dict = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' )
lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase )
if objects is not None:
lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
lowerCAmelCase__ : str = []
for path, directories, files in os.walk(UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' )
submodules.append(UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(UpperCamelCase )
return submodules
lowerCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __lowerCAmelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase )
lowerCAmelCase__ : int = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
lowerCAmelCase__ : str = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) )
lowerCAmelCase__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 678 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _lowerCAmelCase ( _lowercase ):
A__ = 42
@flax_register_to_config
class _lowerCAmelCase ( nn.Module , _lowercase , _lowercase ):
A__ = 32
A__ = 4
A__ = 4
A__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
A__ = False
A__ = (320, 640, 1280, 1280)
A__ = 2
A__ = 8
A__ = None
A__ = 1280
A__ = 0.0
A__ = False
A__ = jnp.floataa
A__ = True
A__ = 0
A__ = False
def __magic_name__( self , __UpperCAmelCase ):
# init input tensors
lowerCAmelCase__ : Any = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase__ : Tuple = jnp.zeros(__UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase__ : Dict = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase__ : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase__ , lowerCAmelCase__ : Any = jax.random.split(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["params"]
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.block_out_channels
lowerCAmelCase__ : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase__ : Dict = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase__ : Union[str, Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase__ : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase__ : Tuple = FlaxTimestepEmbedding(__UpperCAmelCase , dtype=self.dtype )
lowerCAmelCase__ : Optional[int] = self.only_cross_attention
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase__ : List[str] = output_channel
lowerCAmelCase__ : int = block_out_channels[i]
lowerCAmelCase__ : int = i == len(__UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase__ : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase__ : Tuple = FlaxDownBlockaD(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = down_blocks
# mid
lowerCAmelCase__ : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Union[str, Any] = list(reversed(__UpperCAmelCase ) )
lowerCAmelCase__ : str = list(reversed(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[int] = list(reversed(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCAmelCase__ : List[Any] = output_channel
lowerCAmelCase__ : str = reversed_block_out_channels[i]
lowerCAmelCase__ : int = reversed_block_out_channels[min(i + 1 , len(__UpperCAmelCase ) - 1 )]
lowerCAmelCase__ : Tuple = i == len(__UpperCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCAmelCase__ : Optional[int] = FlaxCrossAttnUpBlockaD(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase__ : int = FlaxUpBlockaD(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__UpperCAmelCase )
lowerCAmelCase__ : str = output_channel
lowerCAmelCase__ : List[str] = up_blocks
# out
lowerCAmelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCAmelCase__ : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = True , __UpperCAmelCase = False , ):
# 1. time
if not isinstance(__UpperCAmelCase , jnp.ndarray ):
lowerCAmelCase__ : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase__ : int = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase__ : Any = jnp.expand_dims(__UpperCAmelCase , 0 )
lowerCAmelCase__ : Tuple = self.time_proj(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.time_embedding(__UpperCAmelCase )
# 2. pre-process
lowerCAmelCase__ : List[Any] = jnp.transpose(__UpperCAmelCase , (0, 2, 3, 1) )
lowerCAmelCase__ : List[Any] = self.conv_in(__UpperCAmelCase )
# 3. down
lowerCAmelCase__ : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ , lowerCAmelCase__ : int = down_block(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , deterministic=not train )
else:
lowerCAmelCase__ , lowerCAmelCase__ : int = down_block(__UpperCAmelCase , __UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCAmelCase__ : Dict = ()
for down_block_res_sample, down_block_additional_residual in zip(
__UpperCAmelCase , __UpperCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase__ : str = new_down_block_res_samples
# 4. mid
lowerCAmelCase__ : Dict = self.mid_block(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCAmelCase__ : Tuple = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCAmelCase__ : Any = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = up_block(
__UpperCAmelCase , temb=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , res_hidden_states_tuple=__UpperCAmelCase , deterministic=not train , )
else:
lowerCAmelCase__ : Any = up_block(__UpperCAmelCase , temb=__UpperCAmelCase , res_hidden_states_tuple=__UpperCAmelCase , deterministic=not train )
# 6. post-process
lowerCAmelCase__ : Dict = self.conv_norm_out(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = nn.silu(__UpperCAmelCase )
lowerCAmelCase__ : str = self.conv_out(__UpperCAmelCase )
lowerCAmelCase__ : Any = jnp.transpose(__UpperCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__UpperCAmelCase )
| 678 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Union[str, Any] = seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Union[str, Any] = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : List[str] = embedding_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Optional[Any] = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[str] = num_choices
lowerCAmelCase__ : Any = scope
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : str = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
A__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
# test_resize_embeddings = False
A__ = False
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def __magic_name__( self ):
lowerCAmelCase__ : str = MegatronBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __magic_name__( self ):
self.config_tester.run_common_tests()
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
return torch.tensor(
UpperCamelCase , dtype=torch.long , device=UpperCamelCase , )
lowerCAmelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.half()
lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0]
lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj]
lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj]
lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
| 678 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _lowerCAmelCase ( _lowercase ):
A__ = 'EncodecFeatureExtractor'
A__ = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Dict = self.feature_extractor
lowerCAmelCase__ : List[str] = False
def __magic_name__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCAmelCase , language=__UpperCAmelCase , no_timestamps=__UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ : Dict = kwargs.pop('''audio''' , __UpperCAmelCase )
lowerCAmelCase__ : Any = kwargs.pop('''sampling_rate''' , __UpperCAmelCase )
lowerCAmelCase__ : int = kwargs.pop('''text''' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : Any = args[0]
lowerCAmelCase__ : Dict = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
lowerCAmelCase__ : int = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if audio is not None:
lowerCAmelCase__ : Union[str, Any] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase__ : Union[str, Any] = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
lowerCAmelCase__ : Any = audio_inputs['''padding_mask''']
return inputs
def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''audio''' , __UpperCAmelCase )
lowerCAmelCase__ : int = kwargs.pop('''padding_mask''' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : Any = args[0]
lowerCAmelCase__ : str = args[1:]
if audio_values is not None:
return self._decode_audio(__UpperCAmelCase , padding_mask=__UpperCAmelCase )
else:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : Any = to_numpy(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = audio_values.shape
if padding_mask is None:
return list(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = to_numpy(__UpperCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase__ : Optional[Any] = seq_len - padding_mask.shape[-1]
lowerCAmelCase__ : List[Any] = 1 - self.feature_extractor.padding_value
lowerCAmelCase__ : Tuple = np.pad(__UpperCAmelCase , ((0, 0), (0, difference)) , '''constant''' , constant_values=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = audio_values.tolist()
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase__ : Dict = sliced_audio.reshape(__UpperCAmelCase , -1 )
return audio_values
| 678 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'bart'
A__ = ['past_key_values']
A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : int = d_model
lowerCAmelCase__ : str = encoder_ffn_dim
lowerCAmelCase__ : Any = encoder_layers
lowerCAmelCase__ : Dict = encoder_attention_heads
lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCAmelCase__ : Union[str, Any] = decoder_layers
lowerCAmelCase__ : Any = decoder_attention_heads
lowerCAmelCase__ : Tuple = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : Any = activation_dropout
lowerCAmelCase__ : Optional[Any] = activation_function
lowerCAmelCase__ : Union[str, Any] = init_std
lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase__ : int = decoder_layerdrop
lowerCAmelCase__ : Optional[int] = classifier_dropout
lowerCAmelCase__ : str = use_cache
lowerCAmelCase__ : int = encoder_layers
lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
lowerCAmelCase__ : str = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class _lowerCAmelCase ( _lowercase ):
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ : Any = {0: '''batch'''}
lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Dict = super().outputs
else:
lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
lowerCAmelCase__ : int = seq_length if not self.use_past else 1
lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape
lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads
lowerCAmelCase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : List[Any] = decoder_seq_length + 3
lowerCAmelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase__ : Any = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers
lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : List[str] = seqlen + 2
lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads
lowerCAmelCase__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype
lowerCAmelCase__ : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[Any] = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Tuple = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
lowerCAmelCase__ : int = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 678 | 1 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = """T5Config"""
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase__ : Union[str, Any] = jnp.zeros_like(UpperCamelCase )
lowerCAmelCase__ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCAmelCase__ : Optional[int] = shifted_input_ids.at[:, 0].set(UpperCamelCase )
lowerCAmelCase__ : Tuple = jnp.where(shifted_input_ids == -100 , UpperCamelCase , UpperCamelCase )
return shifted_input_ids
class _lowerCAmelCase ( _lowercase ):
A__ = 'mt5'
A__ = MTaConfig
class _lowerCAmelCase ( _lowercase ):
A__ = 'mt5'
A__ = MTaConfig
class _lowerCAmelCase ( _lowercase ):
A__ = 'mt5'
A__ = MTaConfig
| 678 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'sew-d'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Optional[int] = feat_extract_norm
lowerCAmelCase__ : str = feat_extract_activation
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : Any = list(__UpperCAmelCase )
lowerCAmelCase__ : int = conv_bias
lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings
lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups
lowerCAmelCase__ : int = len(self.conv_dim )
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : int = squeeze_factor
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Any = position_buckets
lowerCAmelCase__ : Optional[int] = share_att_key
lowerCAmelCase__ : Tuple = relative_attention
lowerCAmelCase__ : Optional[int] = norm_rel_ebd
lowerCAmelCase__ : Tuple = list(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_dropout
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : List[Any] = feat_proj_dropout
lowerCAmelCase__ : Any = final_dropout
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = feature_layer_norm_eps
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Tuple = apply_spec_augment
lowerCAmelCase__ : List[str] = mask_time_prob
lowerCAmelCase__ : int = mask_time_length
lowerCAmelCase__ : int = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : int = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# ctc loss
lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# sequence classification
lowerCAmelCase__ : Tuple = use_weighted_layer_sum
lowerCAmelCase__ : Dict = classifier_proj_size
@property
def __magic_name__( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 678 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class _lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = pipe(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = generator.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = pipe(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : int = pipe(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ : Optional[int] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 678 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = '''</s>'''
lowerCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__UpperCAmelCase ) , 1103 )
def __magic_name__( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.'''
lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example''']
lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[int] = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def __magic_name__( self ):
# fmt: off
lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : str = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Tuple = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(
__UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 678 | 1 |
def __lowerCAmelCase ( UpperCamelCase ) -> int:
lowerCAmelCase__ : str = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __lowerCAmelCase ( UpperCamelCase ) -> int:
lowerCAmelCase__ : List[Any] = 0
while number > 0:
lowerCAmelCase__ : Optional[int] = number % 10
sum_of_digits += last_digit
lowerCAmelCase__ : Dict = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __lowerCAmelCase ( UpperCamelCase = 100 ) -> int:
lowerCAmelCase__ : int = factorial(UpperCamelCase )
lowerCAmelCase__ : List[Any] = split_and_add(UpperCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 678 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'donut-swin'
A__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : List[str] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Optional[Any] = embed_dim
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : str = mlp_ratio
lowerCAmelCase__ : Optional[int] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[str] = use_absolute_embeddings
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 678 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'glpn'
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[8, 4, 2, 1] , __UpperCAmelCase=[32, 64, 160, 256] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[1, 2, 5, 8] , __UpperCAmelCase=[4, 4, 4, 4] , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=64 , __UpperCAmelCase=10 , __UpperCAmelCase=-1 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : List[Any] = num_encoder_blocks
lowerCAmelCase__ : List[str] = depths
lowerCAmelCase__ : Dict = sr_ratios
lowerCAmelCase__ : List[str] = hidden_sizes
lowerCAmelCase__ : Optional[Any] = patch_sizes
lowerCAmelCase__ : List[str] = strides
lowerCAmelCase__ : int = mlp_ratios
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Dict = drop_path_rate
lowerCAmelCase__ : int = layer_norm_eps
lowerCAmelCase__ : str = decoder_hidden_size
lowerCAmelCase__ : Dict = max_depth
lowerCAmelCase__ : List[Any] = head_in_index
| 678 |
lowerCAmelCase_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCAmelCase_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' )
lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' )
lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : Tuple = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : List[Any] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase__ : int = 1
if from_exponent > to_exponent:
lowerCAmelCase__ : List[str] = from_exponent - to_exponent
else:
lowerCAmelCase__ : Dict = -(to_exponent - from_exponent)
return value * pow(10 , UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 678 | 1 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def __lowerCAmelCase ( UpperCamelCase ) -> str:
lowerCAmelCase__ : Union[str, Any] = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
lowerCAmelCase__ : str = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase__ : Optional[Any] = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase__ : Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase__ : Union[str, Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 678 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__UpperCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 678 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __lowerCAmelCase ( UpperCamelCase=None , UpperCamelCase=None ) -> Dict:
return field(default_factory=lambda: default , metadata=UpperCamelCase )
@dataclass
class _lowerCAmelCase :
A__ = 42
A__ = 42
A__ = 42
A__ = 42
@dataclass
class _lowerCAmelCase :
A__ = 42
A__ = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class _lowerCAmelCase :
A__ = False
A__ = True
A__ = None
class _lowerCAmelCase ( _lowercase ):
A__ = 'titi'
A__ = 'toto'
class _lowerCAmelCase ( _lowercase ):
A__ = 'titi'
A__ = 'toto'
A__ = 42
@dataclass
class _lowerCAmelCase :
A__ = "toto"
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = BasicEnum(self.foo )
@dataclass
class _lowerCAmelCase :
A__ = "toto"
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = MixedTypeEnum(self.foo )
@dataclass
class _lowerCAmelCase :
A__ = None
A__ = field(default=_lowercase , metadata={'help': 'help message'} )
A__ = None
A__ = list_field(default=[] )
A__ = list_field(default=[] )
@dataclass
class _lowerCAmelCase :
A__ = list_field(default=[] )
A__ = list_field(default=[1, 2, 3] )
A__ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
A__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _lowerCAmelCase :
A__ = field()
A__ = field()
A__ = field()
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BasicEnum(self.required_enum )
@dataclass
class _lowerCAmelCase :
A__ = 42
A__ = field()
A__ = None
A__ = field(default='toto' , metadata={'help': 'help message'} )
A__ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class _lowerCAmelCase :
A__ = False
A__ = True
A__ = None
@dataclass
class _lowerCAmelCase :
A__ = None
A__ = field(default=_lowercase , metadata={'help': 'help message'} )
A__ = None
A__ = list_field(default=[] )
A__ = list_field(default=[] )
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ : int = {k: v for k, v in vars(__UpperCAmelCase ).items() if k != '''container'''}
lowerCAmelCase__ : Any = {k: v for k, v in vars(__UpperCAmelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __UpperCAmelCase ) and yy.get('''choices''' , __UpperCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__UpperCAmelCase ) , yy['''type'''](__UpperCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument('''--bar''' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument('''--baz''' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument('''--flag''' , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs='''?''' )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((lowerCAmelCase__) , ) : Optional[int] = parser.parse_args_into_dataclasses(__UpperCAmelCase , look_for_args_file=__UpperCAmelCase )
self.assertFalse(example.flag )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=__UpperCAmelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCAmelCase , help='''help message''' )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCAmelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=__UpperCAmelCase , default=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCAmelCase )
for dataclass_type in dataclass_types:
lowerCAmelCase__ : Tuple = HfArgumentParser(__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = parser.parse_args([] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[int] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
lowerCAmelCase__ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[int] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
lowerCAmelCase__ : Union[str, Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ : Dict = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
lowerCAmelCase__ : Tuple = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ : int = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __magic_name__( self ):
@dataclass
class _lowerCAmelCase :
A__ = "toto"
lowerCAmelCase__ : Union[str, Any] = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : Any = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
lowerCAmelCase__ : Union[str, Any] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
lowerCAmelCase__ : List[Any] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCAmelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCAmelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCAmelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : str = parser.parse_args([] )
self.assertEqual(
__UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ : Any = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def __magic_name__( self ):
lowerCAmelCase__ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__UpperCAmelCase , type=__UpperCAmelCase )
expected.add_argument('''--bar''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=__UpperCAmelCase , type=__UpperCAmelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCAmelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCAmelCase )
lowerCAmelCase__ : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCAmelCase )
for dataclass_type in dataclass_types:
lowerCAmelCase__ : Dict = HfArgumentParser(__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , bar=__UpperCAmelCase , baz=__UpperCAmelCase , ces=[] , des=[] ) )
lowerCAmelCase__ : Dict = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__UpperCAmelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument('''--required_str''' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCAmelCase , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCAmelCase , )
expected.add_argument('''--opt''' , type=__UpperCAmelCase , default=__UpperCAmelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCAmelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
lowerCAmelCase__ : Tuple = parser.parse_dict(__UpperCAmelCase )[0]
lowerCAmelCase__ : Optional[Any] = BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : Any = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(__UpperCAmelCase , parser.parse_dict , __UpperCAmelCase , allow_extra_keys=__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase , '''temp_json''' )
os.mkdir(__UpperCAmelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
lowerCAmelCase__ : Tuple = BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[Any] = os.path.join(__UpperCAmelCase , '''temp_yaml''' )
os.mkdir(__UpperCAmelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : str = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
lowerCAmelCase__ : Optional[int] = BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = HfArgumentParser(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 678 |
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
# Return True if there is node that has not iterated.
lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase )
lowerCAmelCase__ : Tuple = [s]
lowerCAmelCase__ : Dict = True
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = u
return visited[t]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase ))
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = float('''Inf''' )
lowerCAmelCase__ : Dict = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] )
lowerCAmelCase__ : List[Any] = parent[s]
max_flow += path_flow
lowerCAmelCase__ : List[Any] = sink
while v != source:
lowerCAmelCase__ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase__ : Optional[Any] = parent[v]
for i in range(len(UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 678 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCAmelCase__ : List[str] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : str = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCAmelCase__ : int = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_6000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
# load decoder from hub
lowerCAmelCase__ : Optional[Any] = '''hf-internal-testing/ngram-beam-search-decoder'''
def __magic_name__( self , **__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , **__UpperCAmelCase ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , **__UpperCAmelCase ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCAmelCase )
def __magic_name__( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.get_tokenizer()
lowerCAmelCase__ : Dict = self.get_feature_extractor()
lowerCAmelCase__ : Union[str, Any] = self.get_decoder()
lowerCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : str = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__UpperCAmelCase , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.get_feature_extractor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : str = self.get_decoder()
lowerCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = floats_list((3, 1000) )
lowerCAmelCase__ : Tuple = feature_extractor(__UpperCAmelCase , return_tensors='''np''' )
lowerCAmelCase__ : Tuple = processor(__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__( self ):
lowerCAmelCase__ : str = self.get_feature_extractor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : str = self.get_decoder()
lowerCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = '''This is a test string'''
lowerCAmelCase__ : Optional[Any] = processor(text=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__( self , __UpperCAmelCase=(2, 10, 16) , __UpperCAmelCase=77 ):
np.random.seed(__UpperCAmelCase )
return np.random.rand(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self.get_feature_extractor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : int = self.get_decoder()
lowerCAmelCase__ : List[str] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : Any = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowerCAmelCase__ : Any = processor.decode(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = decoder.decode_beams(__UpperCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = self.get_feature_extractor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : List[str] = self.get_decoder()
lowerCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : str = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCAmelCase__ : Optional[Any] = processor.batch_decode(__UpperCAmelCase )
else:
with get_context(__UpperCAmelCase ).Pool() as pool:
lowerCAmelCase__ : Dict = processor.batch_decode(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = list(__UpperCAmelCase )
with get_context('''fork''' ).Pool() as p:
lowerCAmelCase__ : List[Any] = decoder.decode_beams_batch(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__UpperCAmelCase , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__UpperCAmelCase , decoded_processor.logit_score )
self.assertListEqual(__UpperCAmelCase , decoded_processor.lm_score )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.get_feature_extractor()
lowerCAmelCase__ : int = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = self.get_decoder()
lowerCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self._get_dummy_logits()
lowerCAmelCase__ : Tuple = 15
lowerCAmelCase__ : str = -20.0
lowerCAmelCase__ : Tuple = -4.0
lowerCAmelCase__ : Dict = processor.batch_decode(
__UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
lowerCAmelCase__ : List[Any] = decoded_processor_out.text
lowerCAmelCase__ : Dict = list(__UpperCAmelCase )
with get_context('''fork''' ).Pool() as pool:
lowerCAmelCase__ : List[Any] = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
lowerCAmelCase__ : Any = [d[0][0] for d in decoded_decoder_out]
lowerCAmelCase__ : str = [d[0][2] for d in decoded_decoder_out]
lowerCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __UpperCAmelCase )
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __UpperCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __UpperCAmelCase , atol=1e-3 ) )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.get_feature_extractor()
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = self.get_decoder()
lowerCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self._get_dummy_logits()
lowerCAmelCase__ : Optional[Any] = 2.0
lowerCAmelCase__ : List[Any] = 5.0
lowerCAmelCase__ : Tuple = -20.0
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : int = processor.batch_decode(
__UpperCAmelCase , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
lowerCAmelCase__ : Any = decoded_processor_out.text
lowerCAmelCase__ : int = list(__UpperCAmelCase )
decoder.reset_params(
alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
with get_context('''fork''' ).Pool() as pool:
lowerCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , )
lowerCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __UpperCAmelCase )
lowerCAmelCase__ : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase__ : List[str] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCAmelCase__ : Optional[int] = os.listdir(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase__ : Optional[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCAmelCase__ : List[Any] = os.listdir(__UpperCAmelCase )
lowerCAmelCase__ : Dict = os.listdir(__UpperCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : Union[str, Any] = floats_list((3, 1000) )
lowerCAmelCase__ : Optional[int] = processor_wavaveca(__UpperCAmelCase , return_tensors='''np''' )
lowerCAmelCase__ : int = processor_auto(__UpperCAmelCase , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
lowerCAmelCase__ : Union[str, Any] = self._get_dummy_logits()
lowerCAmelCase__ : List[str] = processor_wavaveca.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = processor_auto.batch_decode(__UpperCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.get_feature_extractor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : Any = self.get_decoder()
lowerCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Tuple = [d[key] for d in offsets]
return retrieved_list
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : Any = self._get_dummy_logits()[0]
lowerCAmelCase__ : Tuple = processor.decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __magic_name__( self ):
lowerCAmelCase__ : int = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : Optional[int] = self._get_dummy_logits()
lowerCAmelCase__ : Dict = processor.batch_decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__UpperCAmelCase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Tuple = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) )
lowerCAmelCase__ : Dict = iter(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = next(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCAmelCase__ : str = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ).logits.cpu().numpy()
lowerCAmelCase__ : Optional[int] = processor.decode(logits[0] , output_word_offsets=__UpperCAmelCase )
lowerCAmelCase__ : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCAmelCase__ : Union[str, Any] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCAmelCase__ : List[Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__UpperCAmelCase , '''word''' ) ) , __UpperCAmelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(__UpperCAmelCase , '''word''' ) ) , output.text )
# output times
lowerCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(__UpperCAmelCase , '''start_time''' ) )
lowerCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(__UpperCAmelCase , '''end_time''' ) )
# fmt: off
lowerCAmelCase__ : Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
lowerCAmelCase__ : List[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01 ) )
| 678 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
A__ = MODEL_FOR_CAUSAL_LM_MAPPING
A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id
lowerCAmelCase__ : List[Any] = '''<pad>'''
lowerCAmelCase__ : List[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def __magic_name__( self ):
lowerCAmelCase__ : Any = '''Hello I believe in'''
lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator.model
lowerCAmelCase__ : Optional[int] = text_generator.tokenizer
lowerCAmelCase__ : Tuple = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ : List[str] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ : str = text_generator('''''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
# Classic `model_kwargs`
lowerCAmelCase__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''Hello world'''
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 678 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
@property
def __magic_name__( self ):
torch.manual_seed(0 )
lowerCAmelCase__ : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def __magic_name__( self ):
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def __magic_name__( self ):
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.dummy_uncond_unet
lowerCAmelCase__ : str = DDIMScheduler()
lowerCAmelCase__ : Union[str, Any] = self.dummy_vq_model
lowerCAmelCase__ : Tuple = LDMPipeline(unet=__UpperCAmelCase , vqvae=__UpperCAmelCase , scheduler=__UpperCAmelCase )
ldm.to(__UpperCAmelCase )
ldm.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = ldm(generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''numpy''' ).images
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = ldm(generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''numpy''' , return_dict=__UpperCAmelCase )[0]
lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[Any] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
lowerCAmelCase__ : Dict = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(__UpperCAmelCase )
ldm.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = ldm(generator=__UpperCAmelCase , num_inference_steps=5 , output_type='''numpy''' ).images
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase__ : List[str] = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
lowerCAmelCase__ : int = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 678 |
def __lowerCAmelCase ( UpperCamelCase ) -> str:
return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] )
def __lowerCAmelCase ( UpperCamelCase ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(UpperCamelCase ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 | 1 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _lowerCAmelCase ( _lowercase ):
def __init__( self ):
lowerCAmelCase__ : Optional[int] = []
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_init_end''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_train_begin''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_train_end''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_epoch_begin''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_epoch_end''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_step_begin''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_step_end''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_evaluate''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_predict''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_save''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_log''' )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
self.events.append('''on_prediction_step''' )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = tempfile.mkdtemp()
def __magic_name__( self ):
shutil.rmtree(self.output_dir )
def __magic_name__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=64 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase=False , **__UpperCAmelCase ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCAmelCase__ : Union[str, Any] = RegressionDataset(length=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = RegressionDataset(length=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = RegressionModelConfig(a=__UpperCAmelCase , b=__UpperCAmelCase )
lowerCAmelCase__ : Any = RegressionPreTrainedModel(__UpperCAmelCase )
lowerCAmelCase__ : str = TrainingArguments(self.output_dir , disable_tqdm=__UpperCAmelCase , report_to=[] , **__UpperCAmelCase )
return Trainer(
__UpperCAmelCase , __UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , callbacks=__UpperCAmelCase , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
# Order doesn't matter
lowerCAmelCase__ : Dict = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cb.__class__.__name__ )
lowerCAmelCase__ : Optional[Any] = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cb.__class__.__name__ )
for cba, cba in zip(__UpperCAmelCase , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(__UpperCAmelCase , cba.__class__ )
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(cba.__class__ , __UpperCAmelCase )
else:
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : int = ['''on_init_end''', '''on_train_begin''']
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = len(trainer.get_eval_dataloader() )
lowerCAmelCase__ : int = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(__UpperCAmelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __magic_name__( self ):
lowerCAmelCase__ : Any = self.get_trainer()
lowerCAmelCase__ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
# Callbacks passed at init are added to the default callbacks
lowerCAmelCase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCAmelCase__ : Tuple = self.get_trainer(disable_tqdm=__UpperCAmelCase )
lowerCAmelCase__ : str = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCAmelCase__ : Optional[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__UpperCAmelCase )
expected_callbacks.remove(__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
lowerCAmelCase__ : Any = self.get_trainer()
lowerCAmelCase__ : List[str] = trainer.pop_callback(__UpperCAmelCase )
self.assertEqual(cb.__class__ , __UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
trainer.add_callback(__UpperCAmelCase )
expected_callbacks.insert(0 , __UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
# We can also add, pop, or remove by instance
lowerCAmelCase__ : Any = self.get_trainer()
lowerCAmelCase__ : Optional[int] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__UpperCAmelCase )
expected_callbacks.remove(__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
lowerCAmelCase__ : str = self.get_trainer()
lowerCAmelCase__ : List[str] = trainer.callback_handler.callbacks[0]
lowerCAmelCase__ : List[str] = trainer.pop_callback(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
trainer.add_callback(__UpperCAmelCase )
expected_callbacks.insert(0 , __UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
def __magic_name__( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCAmelCase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
# Independent log/save/eval
lowerCAmelCase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCAmelCase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCAmelCase__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
lowerCAmelCase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
lowerCAmelCase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
lowerCAmelCase__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
lowerCAmelCase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
# A bit of everything
lowerCAmelCase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
lowerCAmelCase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
lowerCAmelCase__ : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__UpperCAmelCase ) in warn_mock.call_args[0][0]
| 678 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _lowerCAmelCase ( _lowercase ):
A__ = (DPMSolverSDEScheduler,)
A__ = 10
def __magic_name__( self , **__UpperCAmelCase ):
lowerCAmelCase__ : Dict = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__UpperCAmelCase )
return config
def __magic_name__( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def __magic_name__( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase )
def __magic_name__( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def __magic_name__( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = output.prev_sample
lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 678 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
lowerCAmelCase_ = """▁"""
class _lowerCAmelCase ( _lowercase ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = BigBirdTokenizer
A__ = ['input_ids', 'attention_mask']
A__ = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase="[CLS]" , **__UpperCAmelCase , ):
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
lowerCAmelCase__ : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
lowerCAmelCase__ : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
lowerCAmelCase__ : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
lowerCAmelCase__ : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ : Optional[int] = vocab_file
lowerCAmelCase__ : str = False if not self.vocab_file else True
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 678 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = 3
lowerCAmelCase__ : Tuple = 250
lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : List[str] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__UpperCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
| 678 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.