code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from itertools import permutations
def __snake_case ( __UpperCamelCase : tuple ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A_ = [7, 11, 13, 17]
for i, test in enumerate(lowerCAmelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __snake_case ( __UpperCamelCase : int = 10 ):
"""simple docstring"""
return sum(
int("".join(map(lowerCAmelCase__ ,lowerCAmelCase__ ) ) )
for num in permutations(range(lowerCAmelCase__ ) )
if is_substring_divisible(lowerCAmelCase__ ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 312
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , ):
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = 99
__a = 32
__a = 2
__a = 4
__a = 37
__a = '''gelu'''
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = None
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = TFEsmModel(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_a )
__a = [input_ids, input_mask]
__a = model(_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = TFEsmModel(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
__a = model(_a )
__a = [input_ids, input_mask]
__a = model(_a , encoder_hidden_states=_a )
# Also check the case where encoder outputs are not passed
__a = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = TFEsmForMaskedLM(config=_a )
__a = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFEsmForTokenClassification(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
def __UpperCAmelCase ( self ):
__a = TFEsmModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFEsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__a = model.get_bias()
assert isinstance(_a , _a )
for k, v in name.items():
assert isinstance(_a , tf.Variable )
else:
__a = model.get_output_embeddings()
assert x is None
__a = model.get_bias()
assert name is None
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_a )[0]
__a = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _a )
# compare the actual values for a slice.
__a = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __UpperCAmelCase ( self ):
__a = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__a = model(_a )[0]
# compare the actual values for a slice.
__a = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 45
| 0
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=_a , default=_a , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=_a , help='Name of the model to download' )
download_parser.set_defaults(func=_a )
def __init__( self : Optional[int] , A_ : Union[str, Any] , A_ : str , A_ : List[Any] , A_ : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = model
lowerCamelCase_ = cache
lowerCamelCase_ = force
lowerCamelCase_ = trust_remote_code
def a__ ( self : Dict ) -> str:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 204
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
__a = len(bin(lowerCAmelCase__ )[3:] )
__a = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
__a = (
(
'''1'''
+ '''0''' * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ) -> Union[str, Any]:
for attribute in key.split("." ):
A_ : Optional[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
A_ : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
A_ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ : Union[str, Any] = value
elif weight_type == "weight_g":
A_ : int = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Any = value
else:
A_ : int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> int:
A_ : List[Any] = []
A_ : List[str] = fairseq_model.state_dict()
A_ : int = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ : Tuple = None
for name, value in fairseq_dict.items():
A_ : Dict = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
A_ : Any = True
elif name.split("." )[0] == "proj":
A_ : Any = fairseq_model.proj
A_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ : str = True
if "*" in mapped_key:
A_ : Tuple = name.split(lowerCAmelCase__ )[0].split("." )[-2]
A_ : Any = mapped_key.replace("*" , lowerCAmelCase__ )
if "weight_g" in name:
A_ : Tuple = "weight_g"
elif "weight_v" in name:
A_ : Dict = "weight_v"
elif "bias" in name:
A_ : str = "bias"
elif "weight" in name:
A_ : List[Any] = "weight"
else:
A_ : Dict = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> str:
A_ : Dict = full_name.split("conv_layers." )[-1]
A_ : Union[str, Any] = name.split("." )
A_ : str = int(items[0] )
A_ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase__ )
def __snake_case ( _lowerCAmelCase : int ) -> List[str]:
A_ , A_ : Dict = emb.weight.shape
A_ : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
A_ : Optional[Any] = emb.weight.data
return lin_layer
def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> Tuple:
with open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as f:
A_ : Any = f.readlines()
A_ : Optional[int] = [line.split(" " )[0] for line in lines]
A_ : List[Any] = len(lowerCAmelCase__ )
A_ : Optional[int] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(lowerCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
A_ : List[Any] = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
A_ : str = SpeechaTextaConfig.from_pretrained(
lowerCAmelCase__ , vocab_size=lowerCAmelCase__ , decoder_layers=lowerCAmelCase__ , do_stable_layer_norm=lowerCAmelCase__ )
A_ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
A_ , A_ , A_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ : int = model[0].eval()
# set weights for wav2vec2 encoder
A_ : Any = WavaVecaModel(lowerCAmelCase__ )
A_ : Any = recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase__ )
A_ : List[str] = SpeechaTextaForCausalLM(lowerCAmelCase__ )
A_ , A_ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase__ )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ : str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
A_ : Any = SpeechEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
A_ : Dict = False
# add projection layer
A_ : int = nn.Parameter(projection_layer.weight )
A_ : Any = nn.Parameter(projection_layer.bias )
A_ : Optional[int] = create_vocab_dict(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , "vocab.json" ) , "w" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
A_ : str = SpeechaTextaTokenizer(os.path.join(lowerCAmelCase__ , "vocab.json" ) )
tokenizer.save_pretrained(lowerCAmelCase__ )
A_ : Optional[int] = hf_wavavec.config.to_dict()
A_ : Tuple = tokenizer.pad_token_id
A_ : Dict = tokenizer.bos_token_id
A_ : List[str] = tokenizer.eos_token_id
A_ : List[Any] = "speech_to_text_2"
A_ : int = "wav2vec2"
A_ : int = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
feature_extractor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCAmelCase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 300
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] ) -> str:
__a = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 45
| 0
|
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = args.log_outputs
__lowerCAmelCase = '''_'''.join(args.dataset.split('''/''') + [args.config, args.split])
# load metric
__lowerCAmelCase = load_metric('''wer''')
__lowerCAmelCase = load_metric('''cer''')
# compute metrics
__lowerCAmelCase = wer.compute(references=result['''target'''], predictions=result['''prediction'''])
__lowerCAmelCase = cer.compute(references=result['''target'''], predictions=result['''prediction'''])
# print & log results
__lowerCAmelCase = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCAmelCase__)
with open(F"""{dataset_id}_eval_results.txt""", '''w''') as f:
f.write(lowerCAmelCase__)
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__lowerCAmelCase = F"""log_{dataset_id}_predictions.txt"""
__lowerCAmelCase = F"""log_{dataset_id}_targets.txt"""
with open(lowerCAmelCase__, '''w''') as p, open(lowerCAmelCase__, '''w''') as t:
# mapping function to write output
def write_to_file(lowerCamelCase, lowerCamelCase):
p.write(F"""{i}""" + '''\n''')
p.write(batch['''prediction'''] + '''\n''')
t.write(F"""{i}""" + '''\n''')
t.write(batch['''target'''] + '''\n''')
result.map(lowerCAmelCase__, with_indices=lowerCAmelCase__)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__lowerCAmelCase = re.sub(lowerCAmelCase__, '''''', text.lower())
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__lowerCAmelCase = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
__lowerCAmelCase = ''' '''.join(text.split(lowerCAmelCase__))
return text
def __magic_name__( lowerCamelCase):
# load dataset
__lowerCAmelCase = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=lowerCAmelCase__)
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id)
__lowerCAmelCase = feature_extractor.sampling_rate
# resample audio
__lowerCAmelCase = dataset.cast_column('''audio''', Audio(sampling_rate=lowerCAmelCase__))
# load eval pipeline
if args.device is None:
__lowerCAmelCase = 0 if torch.cuda.is_available() else -1
__lowerCAmelCase = pipeline('''automatic-speech-recognition''', model=args.model_id, device=args.device)
# map function to decode audio
def map_to_pred(lowerCamelCase):
__lowerCAmelCase = asr(
batch['''audio''']['''array'''], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s)
__lowerCAmelCase = prediction['''text''']
__lowerCAmelCase = normalize_text(batch['''sentence'''])
return batch
# run inference on all examples
__lowerCAmelCase = dataset.map(lowerCAmelCase__, remove_columns=dataset.column_names)
# compute and log_results
# do not change function below
log_results(lowerCAmelCase__, lowerCAmelCase__)
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
main(args)
| 174
|
"""simple docstring"""
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase ( lowerCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowerCAmelCase__ )
__a = ''''''.join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
__a = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a = b'''=''' * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
__a = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowercase ( lowerCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
__a = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
__a = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a = encoded_data[:-padding]
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
__a = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase__ ) , 8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ = 'focalnet'
def __init__( self : List[Any] , _A : List[Any]=224 , _A : Optional[int]=4 , _A : Union[str, Any]=3 , _A : str=96 , _A : Dict=False , _A : int=[192, 384, 768, 768] , _A : Optional[Any]=[2, 2, 6, 2] , _A : Union[str, Any]=[2, 2, 2, 2] , _A : Tuple=[3, 3, 3, 3] , _A : List[Any]="gelu" , _A : Dict=4.0 , _A : str=0.0 , _A : Optional[int]=0.1 , _A : Optional[int]=False , _A : int=1e-4 , _A : List[str]=False , _A : str=False , _A : int=False , _A : Any=0.02 , _A : Optional[int]=1e-5 , _A : int=32 , _A : List[str]=None , _A : Tuple=None , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_a )
__SCREAMING_SNAKE_CASE : Dict = image_size
__SCREAMING_SNAKE_CASE : Any = patch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : int = embed_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = use_conv_embed
__SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
__SCREAMING_SNAKE_CASE : Tuple = depths
__SCREAMING_SNAKE_CASE : Optional[Any] = focal_levels
__SCREAMING_SNAKE_CASE : Tuple = focal_windows
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = mlp_ratio
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = use_layerscale
__SCREAMING_SNAKE_CASE : int = layerscale_value
__SCREAMING_SNAKE_CASE : Dict = use_post_layernorm
__SCREAMING_SNAKE_CASE : Any = use_post_layernorm_in_modulation
__SCREAMING_SNAKE_CASE : int = normalize_modulator
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[int] = encoder_stride
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 303
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE):
snake_case__ = 'albert'
def __init__( self : Tuple , __UpperCamelCase : Any=3_0000 , __UpperCamelCase : Dict=128 , __UpperCamelCase : int=4096 , __UpperCamelCase : Tuple=12 , __UpperCamelCase : int=1 , __UpperCamelCase : Union[str, Any]=64 , __UpperCamelCase : str=1_6384 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : str="gelu_new" , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Any=512 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : Optional[Any]=1E-12 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : int="absolute" , __UpperCamelCase : int=0 , __UpperCamelCase : int=2 , __UpperCamelCase : Dict=3 , **__UpperCamelCase : Union[str, Any] , ) -> List[Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_UpperCamelCase = vocab_size
_UpperCamelCase = embedding_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_hidden_groups
_UpperCamelCase = num_attention_heads
_UpperCamelCase = inner_group_num
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = classifier_dropout_prob
_UpperCamelCase = position_embedding_type
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE):
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 256
|
"""simple docstring"""
import numpy as np
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> np.ndarray:
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE__:int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE__:int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
SCREAMING_SNAKE_CASE__:Any = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
SCREAMING_SNAKE_CASE__:Tuple = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
SCREAMING_SNAKE_CASE__:Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
SCREAMING_SNAKE_CASE__:Dict = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 261
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = set()
# Replace all the whitespace in our sentence
__a = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCAmelCase__ ) == 26
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = [False] * 26
for char in input_str:
if char.islower():
__a = True
elif char.isupper():
__a = True
return all(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase ( ) -> None:
from timeit import timeit
__a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=lowerCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 45
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
__a = 42
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Union[str, Any] , _lowerCamelCase : Union[str, Any] = 65536 , _lowerCamelCase : List[str] = None , _lowerCamelCase : str = 2 , _lowerCamelCase : Tuple = 2 , _lowerCamelCase : Optional[int] = 0 , _lowerCamelCase : Dict = "fourier" , _lowerCamelCase : Tuple = True , _lowerCamelCase : int = False , _lowerCamelCase : List[str] = 0.0 , _lowerCamelCase : List[Any] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _lowerCamelCase : str = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _lowerCamelCase : List[Any] = "UNetMidBlock1D" , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any] = (32, 32, 64) , _lowerCamelCase : Union[str, Any] = None , _lowerCamelCase : List[Any] = 8 , _lowerCamelCase : str = 1 , _lowerCamelCase : str = False , ):
super().__init__()
_snake_case = sample_size
# time
if time_embedding_type == "fourier":
_snake_case = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_a , log=_a , flip_sin_to_cos=_a )
_snake_case = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_snake_case = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_a , downscale_freq_shift=_a )
_snake_case = block_out_channels[0]
if use_timestep_embedding:
_snake_case = block_out_channels[0] * 4
_snake_case = TimestepEmbedding(
in_channels=_a , time_embed_dim=_a , act_fn=_a , out_dim=block_out_channels[0] , )
_snake_case = nn.ModuleList([] )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = None
# down
_snake_case = in_channels
for i, down_block_type in enumerate(_a ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_snake_case = i == len(_a ) - 1
_snake_case = get_down_block(
_a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_a )
# mid
_snake_case = get_mid_block(
_a , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_a , add_downsample=_a , )
# up
_snake_case = list(reversed(_a ) )
_snake_case = reversed_block_out_channels[0]
if out_block_type is None:
_snake_case = out_channels
else:
_snake_case = block_out_channels[0]
for i, up_block_type in enumerate(_a ):
_snake_case = output_channel
_snake_case = (
reversed_block_out_channels[i + 1] if i < len(_a ) - 1 else final_upsample_channels
)
_snake_case = i == len(_a ) - 1
_snake_case = get_up_block(
_a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_a )
_snake_case = output_channel
# out
_snake_case = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_snake_case = get_out_block(
out_block_type=_a , num_groups_out=_a , embed_dim=block_out_channels[0] , out_channels=_a , act_fn=_a , fc_dim=block_out_channels[-1] // 4 , )
def lowercase ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : List[str] = True , ):
_snake_case = timestep
if not torch.is_tensor(_a ):
_snake_case = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_snake_case = timesteps[None].to(sample.device )
_snake_case = self.time_proj(_a )
if self.config.use_timestep_embedding:
_snake_case = self.time_mlp(_a )
else:
_snake_case = timestep_embed[..., None]
_snake_case = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_snake_case = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_snake_case = ()
for downsample_block in self.down_blocks:
_snake_case , _snake_case = downsample_block(hidden_states=_a , temb=_a )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_snake_case = self.mid_block(_a , _a )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_snake_case = down_block_res_samples[-1:]
_snake_case = down_block_res_samples[:-1]
_snake_case = upsample_block(_a , res_hidden_states_tuple=_a , temb=_a )
# 5. post-process
if self.out_block:
_snake_case = self.out_block(_a , _a )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_a )
| 288
|
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=14 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_token_type_ids
__a = use_input_mask
__a = use_labels
__a = use_mc_token_ids
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = self.vocab_size - 1
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
if self.use_mc_token_ids:
__a = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
__a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = CTRLModel(config=_a )
model.to(_a )
model.eval()
model(_a , token_type_ids=_a , head_mask=_a )
model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = CTRLLMHeadModel(_a )
model.to(_a )
model.eval()
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a , _a , _a , *_a ):
__a = self.num_labels
__a = CTRLForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : str = False
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self ):
__a = CTRLModelTester(self )
__a = ConfigTester(self , config_class=_a , n_embd=37 )
def __UpperCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
@slow
def __UpperCAmelCase ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = CTRLModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self ):
pass
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self ):
__a = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_a )
__a = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_a ) # Legal the president is
__a = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__a = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].tolist() , _a )
| 45
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 45
| 0
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase__ :int = logging.getLogger(__name__)
lowerCAmelCase__ :Union[str, Any] = tf.data.AUTOTUNE
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=lowerCAmelCase__ , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=lowerCAmelCase__ , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=lowerCAmelCase__ , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=lowerCAmelCase__ , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=lowerCAmelCase__ , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=lowerCAmelCase__ , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=lowerCAmelCase__ , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=lowerCAmelCase__ , default=2**1_8 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=lowerCAmelCase__ , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=lowerCAmelCase__ , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=lowerCAmelCase__ , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=lowerCAmelCase__ , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=lowerCAmelCase__ , default=5_1_2 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=lowerCAmelCase__ , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=lowerCAmelCase__ , help='Model ID to upload to on the Hugging Face Hub.' )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Optional[int] ) -> int:
'''simple docstring'''
try:
if args.tpu_name:
_UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def lowerCAmelCase__ ( a__: Dict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = 0
for file in file_list:
_UpperCAmelCase = file.split('/' )[-1]
_UpperCAmelCase = re.search(R'-\d+-(\d+)\.tfrecord' , lowerCAmelCase__ ).group(1 )
_UpperCAmelCase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def lowerCAmelCase__ ( a__: str , a__: Optional[int] , a__: Tuple , a__: Any , a__: Dict , a__: Optional[int]=None ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = count_samples(lowerCAmelCase__ )
_UpperCAmelCase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
_UpperCAmelCase = dataset.shuffle(len(lowerCAmelCase__ ) )
_UpperCAmelCase = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_UpperCAmelCase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
_UpperCAmelCase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
_UpperCAmelCase = dataset.shuffle(args.shuffle_buffer_size )
_UpperCAmelCase = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
_UpperCAmelCase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
_UpperCAmelCase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def lowerCAmelCase__ ( a__: str ) -> List[str]:
'''simple docstring'''
if not args.no_tpu:
_UpperCAmelCase = initialize_tpu(lowerCAmelCase__ )
_UpperCAmelCase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
_UpperCAmelCase = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer )
_UpperCAmelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
_UpperCAmelCase = tokenizer.vocab_size
_UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
_UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
_UpperCAmelCase = count_samples(lowerCAmelCase__ )
_UpperCAmelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_UpperCAmelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
_UpperCAmelCase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_UpperCAmelCase , _UpperCAmelCase = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=['accuracy'] )
def decode_fn(a__: Tuple ):
_UpperCAmelCase = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_UpperCAmelCase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors='tf' )
def mask_with_collator(a__: List[Any] ):
# TF really needs an isin() function
_UpperCAmelCase = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
_UpperCAmelCase , _UpperCAmelCase = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
_UpperCAmelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
_UpperCAmelCase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
_UpperCAmelCase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
_UpperCAmelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase__ :Any = parse_args()
main(args)
| 329
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 1_0
lowercase_ = 2_5_6
def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[MinHash]:
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowercase ( lowerCAmelCase__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *,
_a = 0.85 , ):
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self._index.query(_a )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def __UpperCAmelCase ( self ):
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(_a )
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def __UpperCAmelCase ( self , _a ):
__a = self.get_duplicate_clusters()
with open(_a , '''w''' ) as f:
json.dump(_a , _a )
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase ( lowerCAmelCase__ : Type[Dataset] ) -> str:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float ) -> Dict:
__a = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
__a = get_tokens(lowerCAmelCase__ )
__a = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(lowerCAmelCase__ )
return extremes
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(lowerCAmelCase__ )}''' )
print(f'''Number of duplicate clusters: {len(lowerCAmelCase__ )}''' )
print(f'''Files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Filtered dataset size: {len(lowerCAmelCase__ )}''' )
return ds_filter, duplicate_clusters
| 45
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 145
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45
| 0
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
A_ = number_of_bytes // partitions
A_ = []
for i in range(lowerCAmelCase__ ):
A_ = i * bytes_per_partition + 1
A_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 45
| 0
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(_a )
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self._create_example_records()
lowerCamelCase_ = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ['col_1', 'col_2'] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = self._create_example_records()
lowerCamelCase_ = Dataset.from_list(_a )
lowerCamelCase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self : Optional[Any] ) -> List[str]: # checks what happens with missing columns
"""simple docstring"""
lowerCamelCase_ = [{'col_1': 1}, {'col_2': 'x'}]
lowerCamelCase_ = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {'col_1': 1} )
self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns
def a__ ( self : List[Any] ) -> Union[str, Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
lowerCamelCase_ = [{'col_1': []}, {'col_1': [1, 2]}]
lowerCamelCase_ = Dataset.from_list(_a )
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 204
|
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> int:
__a = len(lowerCAmelCase__ )
__a = int(math.floor(math.sqrt(lowerCAmelCase__ ) ) )
__a = 0
while arr[min(lowerCAmelCase__ , lowerCAmelCase__ ) - 1] < x:
__a = step
step += int(math.floor(math.sqrt(lowerCAmelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__a = prev + 1
if prev == min(lowerCAmelCase__ , lowerCAmelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be searched:\n"))
lowercase_ = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F'''Number {x} is at index {res}''')
| 45
| 0
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __magic_name__ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Any = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A_ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
A_ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[int] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
A_ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
A_ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A_ : Any = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
A_ : Any = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
A_ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : int = self.get_dummy_components()
A_ : int = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Union[str, Any] = self.get_dummy_inputs(_a )
A_ : Optional[Any] = inputs["prompt"]
A_ : Any = inputs["generator"]
A_ : str = inputs["num_inference_steps"]
A_ : Optional[Any] = inputs["output_type"]
if "image" in inputs:
A_ : str = inputs["image"]
else:
A_ : int = None
if "mask_image" in inputs:
A_ : Optional[Any] = inputs["mask_image"]
else:
A_ : Union[str, Any] = None
if "original_image" in inputs:
A_ : Optional[int] = inputs["original_image"]
else:
A_ : Dict = None
A_ , A_ : List[str] = pipe.encode_prompt(_a )
# inputs with prompt converted to embeddings
A_ : List[Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
A_ : List[str] = image
if mask_image is not None:
A_ : int = mask_image
if original_image is not None:
A_ : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_a , _a , _a )
A_ : Optional[Any] = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
A_ : List[str] = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_a , _a ) is None , f"`{optional_component}` did not stay set to None after loading." , )
A_ : Dict = self.get_dummy_inputs(_a )
A_ : Optional[Any] = inputs["generator"]
A_ : int = inputs["num_inference_steps"]
A_ : Tuple = inputs["output_type"]
# inputs with prompt converted to embeddings
A_ : Optional[int] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
A_ : List[Any] = image
if mask_image is not None:
A_ : Optional[Any] = mask_image
if original_image is not None:
A_ : str = original_image
A_ : Union[str, Any] = pipe_loaded(**_a )[0]
A_ : Tuple = np.abs(to_np(_a ) - to_np(_a ) ).max()
self.assertLess(_a , 1e-4 )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[int] = self.get_dummy_components()
A_ : List[str] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : List[Any] = self.get_dummy_inputs(_a )
A_ : Optional[int] = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
A_ : int = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A_ : Any = self.get_dummy_inputs(_a )
A_ : List[str] = pipe_loaded(**_a )[0]
A_ : Dict = np.abs(to_np(_a ) - to_np(_a ) ).max()
self.assertLess(_a , 1e-4 )
| 300
|
"""simple docstring"""
lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__a = year // 100
__a = (5 * (century % 4) + 2) % 7
__a = year % 100
__a = centurian % 12
__a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = get_activation('''swish''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _snake_case (self ):
__lowerCAmelCase = get_activation('''silu''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _snake_case (self ):
__lowerCAmelCase = get_activation('''mish''' )
self.assertIsInstance(_a , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _snake_case (self ):
__lowerCAmelCase = get_activation('''gelu''' )
self.assertIsInstance(_a , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 174
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(lowerCAmelCase__ ) == 1:
return True
__a = series[1] - series[0]
for index in range(len(lowerCAmelCase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowercase ( lowerCAmelCase__ : list ) -> float:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__a = 0
for val in series:
answer += val
return answer / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowercase_ = logging.get_logger("""transformers.models.speecht5""")
lowercase_ = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
lowercase_ = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
lowercase_ = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
lowercase_ = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
lowercase_ = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
lowercase_ = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
lowercase_ = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
lowercase_ = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
lowercase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowercase_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowercase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowercase_ = []
lowercase_ = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
lowercase_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
lowercase_ = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
lowercase_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
__SCREAMING_SNAKE_CASE : Dict = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
__SCREAMING_SNAKE_CASE : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE : str = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE : int = value
elif weight_type == "running_mean":
__SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "running_var":
__SCREAMING_SNAKE_CASE : str = value
elif weight_type == "num_batches_tracked":
__SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
__SCREAMING_SNAKE_CASE : Any = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = []
if task == "s2t":
__SCREAMING_SNAKE_CASE : Dict = hf_model.speechta.encoder.prenet.feature_encoder
__SCREAMING_SNAKE_CASE : Any = MAPPING_S2T
__SCREAMING_SNAKE_CASE : List[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = MAPPING_T2S
__SCREAMING_SNAKE_CASE : Tuple = IGNORE_KEYS_T2S
elif task == "s2s":
__SCREAMING_SNAKE_CASE : str = hf_model.speechta.encoder.prenet.feature_encoder
__SCREAMING_SNAKE_CASE : Dict = MAPPING_S2S
__SCREAMING_SNAKE_CASE : List[Any] = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
__SCREAMING_SNAKE_CASE : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__SCREAMING_SNAKE_CASE : str = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = key.split('''.*.''' )
if prefix in name and suffix in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__SCREAMING_SNAKE_CASE : int = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
__SCREAMING_SNAKE_CASE : Tuple = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE : str = '''weight_g'''
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_v'''
elif "bias" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''bias'''
elif "weight" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight'''
elif "running_mean" in name:
__SCREAMING_SNAKE_CASE : Any = '''running_mean'''
elif "running_var" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''running_var'''
elif "num_batches_tracked" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''num_batches_tracked'''
else:
__SCREAMING_SNAKE_CASE : Dict = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = name.split('''.''' )
__SCREAMING_SNAKE_CASE : Tuple = int(items[0] )
__SCREAMING_SNAKE_CASE : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ):
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE : str = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = SpeechTaConfig()
if task == "s2t":
__SCREAMING_SNAKE_CASE : Tuple = config.max_text_positions
__SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
__SCREAMING_SNAKE_CASE : Dict = 1_876
__SCREAMING_SNAKE_CASE : Optional[int] = 600
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.max_speech_positions
__SCREAMING_SNAKE_CASE : Tuple = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
__SCREAMING_SNAKE_CASE : str = 1_876
__SCREAMING_SNAKE_CASE : Dict = config.max_speech_positions
__SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
__SCREAMING_SNAKE_CASE : str = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : List[str] = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
__SCREAMING_SNAKE_CASE : Tuple = SpeechTaFeatureExtractor()
__SCREAMING_SNAKE_CASE : Dict = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 303
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase ( lowerCAmelCase__ : Namespace ) -> Tuple:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase_ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _a ):
__a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_a , required=_a , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_a , required=_a , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_a , required=_a , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_a , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_a , default=_a , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a , _a , _a , *_a , ):
__a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
__a = model_type
__a = tf_checkpoint
__a = pytorch_dump_output
__a = config
__a = finetuning_task_name
def __UpperCAmelCase ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
if "ckpt" in self._tf_checkpoint.lower():
__a = self._tf_checkpoint
__a = ''''''
else:
__a = self._tf_checkpoint
__a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_a , self._config , self._pytorch_dump_output , _a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 45
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = """\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"""
@dataclass
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE):
snake_case__ = 42
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE):
def __init__( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Any , ) -> str:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> List[str]:
if latents is None:
_UpperCamelCase = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_UpperCamelCase = latents.to(_a )
_UpperCamelCase = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any]=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_UpperCamelCase = torch.device(F'''cuda:{gpu_id}''' )
_UpperCamelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def _UpperCamelCase ( self : Any ) -> str:
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Tuple , ) -> Dict:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_UpperCamelCase = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_UpperCamelCase = self.image_processor(_a , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
_UpperCamelCase = image.to(dtype=self.image_encoder.dtype , device=_a )
_UpperCamelCase = self.image_encoder(_a )['''last_hidden_state''']
_UpperCamelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCamelCase = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_UpperCamelCase = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int = 1 , __UpperCamelCase : Union[str, Any] = 25 , __UpperCamelCase : Tuple = None , __UpperCamelCase : Any = None , __UpperCamelCase : str = 4.0 , __UpperCamelCase : Dict = 64 , __UpperCamelCase : Optional[int] = "pil" , __UpperCamelCase : Union[str, Any] = True , ) -> List[str]:
if isinstance(_a , PIL.Image.Image ):
_UpperCamelCase = 1
elif isinstance(_a , torch.Tensor ):
_UpperCamelCase = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCamelCase = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_UpperCamelCase = self._execution_device
_UpperCamelCase = batch_size * num_images_per_prompt
_UpperCamelCase = guidance_scale > 1.0
_UpperCamelCase = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_UpperCamelCase = self.scheduler.timesteps
_UpperCamelCase = self.prior.config.num_embeddings
_UpperCamelCase = self.prior.config.embedding_dim
_UpperCamelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCamelCase = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase = self.scheduler.scale_model_input(_a , _a )
_UpperCamelCase = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_UpperCamelCase , _UpperCamelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCamelCase , _UpperCamelCase = noise_pred.chunk(2 )
_UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCamelCase = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_UpperCamelCase = []
for i, latent in enumerate(_a ):
print()
_UpperCamelCase = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_UpperCamelCase = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_UpperCamelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCamelCase = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 256
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'beit'
def __init__( self , _a=8_192 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=False , _a=False , _a=False , _a=False , _a=0.1 , _a=0.1 , _a=True , _a=[3, 5, 7, 11] , _a=[1, 2, 3, 6] , _a=True , _a=0.4 , _a=256 , _a=1 , _a=False , _a=255 , **_a , ):
super().__init__(**_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = use_mask_token
__a = use_absolute_position_embeddings
__a = use_relative_position_bias
__a = use_shared_relative_position_bias
__a = layer_scale_init_value
__a = drop_path_rate
__a = use_mean_pooling
# decode head attributes (semantic segmentation)
__a = out_indices
__a = pool_scales
# auxiliary head attributes (semantic segmentation)
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = semantic_loss_ignore_index
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 45
| 0
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _lowerCamelCase( a ):
return "".join(sorted(lowerCAmelCase__ ) )
def _lowerCamelCase( a ):
return word_by_signature[signature(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE__:str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
SCREAMING_SNAKE_CASE__:Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE__:Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 261
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str ) -> list:
if n_term == "":
return []
__a = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
lowercase_ = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 45
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _UpperCAmelCase ( ) -> str:
_snake_case = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_snake_case = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase__ )
DownloadCommand.register_subcommand(lowerCAmelCase__ )
EnvironmentCommand.register_subcommand(lowerCAmelCase__ )
RunCommand.register_subcommand(lowerCAmelCase__ )
ServeCommand.register_subcommand(lowerCAmelCase__ )
UserCommands.register_subcommand(lowerCAmelCase__ )
AddNewModelCommand.register_subcommand(lowerCAmelCase__ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase__ )
LfsCommands.register_subcommand(lowerCAmelCase__ )
PTtoTFCommand.register_subcommand(lowerCAmelCase__ )
# Let's go
_snake_case = parser.parse_args()
if not hasattr(lowerCAmelCase__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_snake_case = args.func(lowerCAmelCase__ )
service.run()
if __name__ == "__main__":
main()
| 288
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : List[str]
__UpperCAmelCase : Optional[str] = None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] = "dict"
__UpperCAmelCase : ClassVar[Any] = None
__UpperCAmelCase : str = field(default='Translation' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCAmelCase ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[List] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[str] = None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] = "dict"
__UpperCAmelCase : ClassVar[Any] = None
__UpperCAmelCase : str = field(default='TranslationVariableLanguages' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self ):
__a = sorted(set(self.languages ) ) if self.languages else None
__a = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __UpperCAmelCase ( self , _a ):
__a = set(self.languages )
if self.languages and set(_a ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(_a ) - lang_set ) )}) are not in valid set ({', '.join(_a )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__a = []
for lang, text in translation_dict.items():
if isinstance(_a , _a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__a , __a = zip(*sorted(_a ) )
return {"language": languages, "translation": translations}
def __UpperCAmelCase ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 45
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=0.9 , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 30}
A__ = crop_size if crop_size is not None else {"height": 30, "width": 30}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize_and_center_crop
A__ = size
A__ = crop_pct
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(_a , "size" ) )
self.assertTrue(hasattr(_a , "crop_pct" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 68
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list ) -> List[Any]:
_enforce_args(lowerCAmelCase__ , lowerCAmelCase__ )
if n == 0:
return 0
__a = float('''-inf''' )
for i in range(1 , n + 1 ):
__a = max(
lowerCAmelCase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCAmelCase__ ) )
return max_revue
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list ) -> List[str]:
_enforce_args(lowerCAmelCase__ , lowerCAmelCase__ )
__a = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list , lowerCAmelCase__ : list ) -> Union[str, Any]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__a = float('''-inf''' )
for i in range(1 , n + 1 ):
__a = max(
lowerCAmelCase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCAmelCase__ , lowerCAmelCase__ ) , )
__a = max_revenue
return max_rev[n]
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list ) -> Dict:
_enforce_args(lowerCAmelCase__ , lowerCAmelCase__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__a = [float('''-inf''' ) for _ in range(n + 1 )]
__a = 0
for i in range(1 , n + 1 ):
__a = max_rev[i]
for j in range(1 , i + 1 ):
__a = max(lowerCAmelCase__ , prices[j - 1] + max_rev[i - j] )
__a = max_revenue_i
return max_rev[n]
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list ) -> str:
if n < 0:
__a = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(lowerCAmelCase__ )
if n > len(lowerCAmelCase__ ):
__a = (
'''Each integral piece of rod must have a corresponding price. '''
f'''Got n = {n} but length of prices = {len(lowerCAmelCase__ )}'''
)
raise ValueError(lowerCAmelCase__ )
def lowercase ( ) -> int:
__a = [6, 10, 12, 15, 20, 23]
__a = len(lowerCAmelCase__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__a = 36
__a = top_down_cut_rod(lowerCAmelCase__ , lowerCAmelCase__ )
__a = bottom_up_cut_rod(lowerCAmelCase__ , lowerCAmelCase__ )
__a = naive_cut_rod_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 45
| 0
|
def lowerCAmelCase__ ( a__: float , a__: int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase__ ) , lowerCAmelCase__ )
return number - int(lowerCAmelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 329
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( a_: str ):
if n_term == "":
return []
_UpperCAmelCase : List[Any] = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
__a = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 145
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = get_activation('''swish''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''silu''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''mish''' )
self.assertIsInstance(_a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''gelu''' )
self.assertIsInstance(_a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 45
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Any=3 , UpperCAmelCase : int=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , UpperCAmelCase : str=[0.5, 0.5, 0.5] , UpperCAmelCase : Tuple=True , UpperCAmelCase : Tuple=1 / 255 , UpperCAmelCase : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = do_normalize
A_ = image_mean
A_ = image_std
A_ = do_rescale
A_ = rescale_factor
A_ = do_pad
def __A ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __A ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=False ):
if not batched:
A_ = image_inputs[0]
if isinstance(_a , Image.Image ):
A_ , A_ = image.size
else:
A_ , A_ = image.shape[1], image.shape[2]
if w < h:
A_ = int(self.size["shortest_edge"] * h / w )
A_ = self.size["shortest_edge"]
elif w > h:
A_ = self.size["shortest_edge"]
A_ = int(self.size["shortest_edge"] * w / h )
else:
A_ = self.size["shortest_edge"]
A_ = self.size["shortest_edge"]
else:
A_ = []
for image in image_inputs:
A_ , A_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ = max(_a , key=lambda UpperCAmelCase : item[0] )[0]
A_ = max(_a , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def __A ( self : List[Any] ):
A_ = YolosImageProcessingTester(self )
@property
def __A ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
def __A ( self : Tuple ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , _a )
A_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_a )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , _a )
def __A ( self : List[str] ):
pass
def __A ( self : Dict ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A_ , A_ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ = self.image_processor_tester.get_expected_values(_a , batched=_a )
A_ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self : List[str] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A_ , A_ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ = image_processing(_a , return_tensors="pt" ).pixel_values
A_ , A_ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self : Optional[int] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A_ , A_ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ = image_processing(_a , return_tensors="pt" ).pixel_values
A_ , A_ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self : int ):
# Initialize image_processings
A_ = self.image_processing_class(**self.image_processor_dict )
A_ = self.image_processing_class(do_resize=_a , do_normalize=_a , do_rescale=_a )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A_ = image_processing_a.pad(_a , return_tensors="pt" )
A_ = image_processing_a(_a , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def __A ( self : Union[str, Any] ):
# prepare image and target
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A_ = json.loads(f.read() )
A_ = {"image_id": 39769, "annotations": target}
# encode them
A_ = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
A_ = image_processing(images=_a , annotations=_a , return_tensors="pt" )
# verify pixel values
A_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , _a )
A_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _a , atol=1E-4 ) )
# verify area
A_ = torch.tensor([5_887.9_600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _a ) )
# verify boxes
A_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _a )
A_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _a , atol=1E-3 ) )
# verify image_id
A_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _a ) )
# verify is_crowd
A_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _a ) )
# verify class_labels
A_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _a ) )
# verify orig_size
A_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _a ) )
# verify size
A_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _a ) )
@slow
def __A ( self : Optional[Any] ):
# prepare image, target and masks_path
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A_ = json.loads(f.read() )
A_ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
A_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A_ = YolosImageProcessor(format="coco_panoptic" )
A_ = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors="pt" )
# verify pixel values
A_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , _a )
A_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _a , atol=1E-4 ) )
# verify area
A_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _a ) )
# verify boxes
A_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _a )
A_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _a , atol=1E-3 ) )
# verify image_id
A_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _a ) )
# verify is_crowd
A_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _a ) )
# verify class_labels
A_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _a ) )
# verify masks
A_ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _a )
# verify orig_size
A_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _a ) )
# verify size
A_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _a ) )
| 312
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , ):
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = 99
__a = 32
__a = 2
__a = 4
__a = 37
__a = '''gelu'''
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = None
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = TFEsmModel(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_a )
__a = [input_ids, input_mask]
__a = model(_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = TFEsmModel(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
__a = model(_a )
__a = [input_ids, input_mask]
__a = model(_a , encoder_hidden_states=_a )
# Also check the case where encoder outputs are not passed
__a = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = TFEsmForMaskedLM(config=_a )
__a = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFEsmForTokenClassification(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
def __UpperCAmelCase ( self ):
__a = TFEsmModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFEsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__a = model.get_bias()
assert isinstance(_a , _a )
for k, v in name.items():
assert isinstance(_a , tf.Variable )
else:
__a = model.get_output_embeddings()
assert x is None
__a = model.get_bias()
assert name is None
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_a )[0]
__a = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _a )
# compare the actual values for a slice.
__a = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __UpperCAmelCase ( self ):
__a = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__a = model(_a )[0]
# compare the actual values for a slice.
__a = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 45
| 0
|
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 'openai/whisper-base'
UpperCamelCase = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
UpperCamelCase = 'transcriber'
UpperCamelCase = WhisperProcessor
UpperCamelCase = WhisperForConditionalGeneration
UpperCamelCase = ['audio']
UpperCamelCase = ['text']
def a__ ( self : Any , A_ : int ) -> List[str]:
"""simple docstring"""
return self.pre_processor(_a , return_tensors='pt' ).input_features
def a__ ( self : Optional[Any] , A_ : str ) -> List[Any]:
"""simple docstring"""
return self.model.generate(inputs=_a )
def a__ ( self : List[str] , A_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.pre_processor.batch_decode(_a , skip_special_tokens=_a )[0]
| 204
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
__a = len(bin(lowerCAmelCase__ )[3:] )
__a = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
__a = (
(
'''1'''
+ '''0''' * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> Any:
A_ : List[Any] = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , lowerCAmelCase__ ).groups()[0]
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :Dict , snake_case :str , snake_case :List[str]=None , snake_case :List[Any]=None ):
'''simple docstring'''
A_ : List[Any] = file_names
A_ : Union[str, Any] = image_transform
A_ : Optional[Any] = label_to_id
def __len__( self :Dict ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self :Any , snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = self.file_names[idx]
A_ : Optional[int] = PIL.Image.open(_a )
A_ : int = raw_image.convert("RGB" )
if self.image_transform is not None:
A_ : Optional[Any] = self.image_transform(_a )
A_ : Any = extract_label(_a )
if self.label_to_id is not None:
A_ : Tuple = self.label_to_id[label]
return {"image": image, "label": label}
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ) -> Tuple:
# Initialize accelerator
if args.with_tracking:
A_ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
A_ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Dict = config["lr"]
A_ : Tuple = int(config["num_epochs"] )
A_ : List[str] = int(config["seed"] )
A_ : List[Any] = int(config["batch_size"] )
A_ : Any = config["image_size"]
if not isinstance(lowerCAmelCase__ , (list, tuple) ):
A_ : Union[str, Any] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
A_ : Dict = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A_ : List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
A_ : List[str] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A_ : Optional[int] = os.path.split(lowerCAmelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCAmelCase__ , lowerCAmelCase__ )
# Grab all the image filenames
A_ : List[Any] = [os.path.join(args.data_dir , lowerCAmelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
A_ : Union[str, Any] = [extract_label(lowerCAmelCase__ ) for fname in file_names]
A_ : str = list(set(lowerCAmelCase__ ) )
id_to_label.sort()
A_ : Union[str, Any] = {lbl: i for i, lbl in enumerate(lowerCAmelCase__ )}
# Set the seed before splitting the data.
np.random.seed(lowerCAmelCase__ )
torch.manual_seed(lowerCAmelCase__ )
torch.cuda.manual_seed_all(lowerCAmelCase__ )
# Split our filenames between train and validation
A_ : Optional[Any] = np.random.permutation(len(lowerCAmelCase__ ) )
A_ : List[str] = int(0.8 * len(lowerCAmelCase__ ) )
A_ : List[str] = random_perm[:cut]
A_ : int = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A_ : str = Compose([RandomResizedCrop(lowerCAmelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
A_ : List[str] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowerCAmelCase__ , label_to_id=lowerCAmelCase__ )
# For evaluation, we use a deterministic Resize
A_ : int = Compose([Resize(lowerCAmelCase__ ), ToTensor()] )
A_ : Union[str, Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowerCAmelCase__ , label_to_id=lowerCAmelCase__ )
# Instantiate dataloaders.
A_ : Optional[Any] = DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , num_workers=4 )
A_ : List[str] = DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Tuple = create_model("resnet50d" , pretrained=lowerCAmelCase__ , num_classes=len(lowerCAmelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : Dict = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A_ : Any = False
for param in model.get_classifier().parameters():
A_ : Tuple = True
# We normalize the batches of images to be a bit faster.
A_ : str = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
A_ : int = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A_ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A_ : List[Any] = OneCycleLR(optimizer=lowerCAmelCase__ , max_lr=lowerCAmelCase__ , epochs=lowerCAmelCase__ , steps_per_epoch=len(lowerCAmelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
A_ : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
A_ : List[Any] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
A_ : List[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A_ : List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A_ : Union[str, Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A_ : Optional[int] = os.path.splitext(lowerCAmelCase__ )[0]
if "epoch" in training_difference:
A_ : Union[str, Any] = int(training_difference.replace("epoch_" , "" ) ) + 1
A_ : List[str] = None
else:
A_ : Union[str, Any] = int(training_difference.replace("step_" , "" ) )
A_ : Tuple = resume_step // len(lowerCAmelCase__ )
resume_step -= starting_epoch * len(lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
if args.with_tracking:
A_ : Union[str, Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A_ : Any = accelerator.skip_first_batches(lowerCAmelCase__ , lowerCAmelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A_ : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A_ : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
A_ : Tuple = (batch["image"] - mean) / std
A_ : List[Any] = model(lowerCAmelCase__ )
A_ : Optional[int] = torch.nn.functional.cross_entropy(lowerCAmelCase__ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A_ : Optional[int] = f"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A_ : List[Any] = os.path.join(args.output_dir , lowerCAmelCase__ )
accelerator.save_state(lowerCAmelCase__ )
model.eval()
A_ : Dict = 0
A_ : int = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A_ : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
A_ : Union[str, Any] = (batch["image"] - mean) / std
with torch.no_grad():
A_ : Any = model(lowerCAmelCase__ )
A_ : Optional[Any] = outputs.argmax(dim=-1 )
A_ , A_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["label"]) )
A_ : List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A_ : Dict = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(lowerCAmelCase__ ),
"epoch": epoch,
} , step=lowerCAmelCase__ , )
if checkpointing_steps == "epoch":
A_ : Optional[Any] = f"epoch_{epoch}"
if args.output_dir is not None:
A_ : int = os.path.join(args.output_dir , lowerCAmelCase__ )
accelerator.save_state(lowerCAmelCase__ )
if args.with_tracking:
accelerator.end_training()
def __snake_case ( ) -> int:
A_ : str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=lowerCAmelCase__ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch." , )
parser.add_argument(
"--output_dir" , type=lowerCAmelCase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCAmelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
A_ : str = parser.parse_args()
A_ : str = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 300
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] ) -> str:
__a = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 45
| 0
|
'''simple docstring'''
import socket
def __magic_name__( ):
__lowerCAmelCase = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
__lowerCAmelCase = socket.gethostname()
__lowerCAmelCase = 1_2_3_1_2
sock.connect((host, port))
sock.send(b'''Hello server!''')
with open('''Received_file''', '''wb''') as out_file:
print('''File opened''')
print('''Receiving data...''')
while True:
__lowerCAmelCase = sock.recv(1_0_2_4)
if not data:
break
out_file.write(lowerCAmelCase__)
print('''Successfully received the file''')
sock.close()
print('''Connection closed''')
if __name__ == "__main__":
main()
| 174
|
"""simple docstring"""
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase ( lowerCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowerCAmelCase__ )
__a = ''''''.join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
__a = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a = b'''=''' * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
__a = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowercase ( lowerCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
__a = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
__a = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a = encoded_data[:-padding]
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
__a = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase__ ) , 8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
from numpy import exp, pi, sqrt
def a__ ( snake_case , snake_case = 0.0 , snake_case = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 256
|
"""simple docstring"""
import numpy as np
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> np.ndarray:
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
"""simple docstring"""
from collections import deque
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = process_name # process name
__a = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__a = arrival_time
__a = burst_time # remaining burst time
__a = 0 # total time of the process wait in ready queue
__a = 0 # time from arrival time to completion time
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
# total number of mlfq's queues
__a = number_of_queues
# time slice of queues that round robin algorithm applied
__a = time_slices
# unfinished process is in this ready_queue
__a = queue
# current time
__a = current_time
# finished process is in this sequence queue
__a = deque()
def a__ ( self ):
__a = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a__ ( self , lowerCamelCase ):
__a = []
for i in range(len(_a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a__ ( self , lowerCamelCase ):
__a = []
for i in range(len(_a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a__ ( self , lowerCamelCase ):
__a = []
for i in range(len(_a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a__ ( self , lowerCamelCase ):
return [q.burst_time for q in queue]
def a__ ( self , lowerCamelCase ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a__ ( self , lowerCamelCase ):
__a = deque() # sequence deque of finished process
while len(_a ) != 0:
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__a = 0
# set the process's turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# set the completion time
__a = self.current_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_a ) ):
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__a = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__a = 0
# set the finish time
__a = self.current_time
# update the process' turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a__ ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__a , __a = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE__:Optional[Any] = Process("""P1""", 0, 53)
SCREAMING_SNAKE_CASE__:str = Process("""P2""", 0, 17)
SCREAMING_SNAKE_CASE__:int = Process("""P3""", 0, 68)
SCREAMING_SNAKE_CASE__:Optional[Any] = Process("""P4""", 0, 24)
SCREAMING_SNAKE_CASE__:Tuple = 3
SCREAMING_SNAKE_CASE__:str = [17, 25]
SCREAMING_SNAKE_CASE__:Optional[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE__:Union[str, Any] = Process("""P1""", 0, 53)
SCREAMING_SNAKE_CASE__:str = Process("""P2""", 0, 17)
SCREAMING_SNAKE_CASE__:Dict = Process("""P3""", 0, 68)
SCREAMING_SNAKE_CASE__:Dict = Process("""P4""", 0, 24)
SCREAMING_SNAKE_CASE__:List[str] = 3
SCREAMING_SNAKE_CASE__:Dict = [17, 25]
SCREAMING_SNAKE_CASE__:List[Any] = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE__:Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE__:int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 261
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = set()
# Replace all the whitespace in our sentence
__a = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCAmelCase__ ) == 26
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = [False] * 26
for char in input_str:
if char.islower():
__a = True
elif char.isupper():
__a = True
return all(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase ( ) -> None:
from timeit import timeit
__a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=lowerCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 45
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( __lowerCamelCase : int ) -> list[int]:
_snake_case = str(lowerCAmelCase__ )
_snake_case = [n]
for i in range(1 , len(lowerCAmelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool:
if len(str(lowerCAmelCase__ ) ) > 3:
if not is_prime(int(str(lowerCAmelCase__ )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase__ )[:3] ) ):
return False
return True
def _UpperCAmelCase ( __lowerCamelCase : int = 11 ) -> list[int]:
_snake_case = []
_snake_case = 13
while len(lowerCAmelCase__ ) != count:
if validate(lowerCAmelCase__ ):
_snake_case = list_truncated_nums(lowerCAmelCase__ )
if all(is_prime(lowerCAmelCase__ ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase__ )
num += 2
return list_truncated_primes
def _UpperCAmelCase ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"{sum(compute_truncated_primes(11)) = }")
| 288
|
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=14 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_token_type_ids
__a = use_input_mask
__a = use_labels
__a = use_mc_token_ids
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = self.vocab_size - 1
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
if self.use_mc_token_ids:
__a = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
__a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = CTRLModel(config=_a )
model.to(_a )
model.eval()
model(_a , token_type_ids=_a , head_mask=_a )
model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = CTRLLMHeadModel(_a )
model.to(_a )
model.eval()
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a , _a , _a , *_a ):
__a = self.num_labels
__a = CTRLForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : str = False
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self ):
__a = CTRLModelTester(self )
__a = ConfigTester(self , config_class=_a , n_embd=37 )
def __UpperCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
@slow
def __UpperCAmelCase ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = CTRLModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self ):
pass
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self ):
__a = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_a )
__a = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_a ) # Legal the president is
__a = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__a = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].tolist() , _a )
| 45
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> Any:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 18}
A__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "do_center_crop" ) )
self.assertTrue(hasattr(_a , "size" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 68
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 45
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( a__: list[list[int]] ) -> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(lowerCAmelCase__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(lowerCAmelCase__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 1_0
lowercase_ = 2_5_6
def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[MinHash]:
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowercase ( lowerCAmelCase__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *,
_a = 0.85 , ):
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self._index.query(_a )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def __UpperCAmelCase ( self ):
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(_a )
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def __UpperCAmelCase ( self , _a ):
__a = self.get_duplicate_clusters()
with open(_a , '''w''' ) as f:
json.dump(_a , _a )
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase ( lowerCAmelCase__ : Type[Dataset] ) -> str:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float ) -> Dict:
__a = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
__a = get_tokens(lowerCAmelCase__ )
__a = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(lowerCAmelCase__ )
return extremes
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(lowerCAmelCase__ )}''' )
print(f'''Number of duplicate clusters: {len(lowerCAmelCase__ )}''' )
print(f'''Files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Filtered dataset size: {len(lowerCAmelCase__ )}''' )
return ds_filter, duplicate_clusters
| 45
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : str , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : List[str] = 1_3
_UpperCAmelCase : Tuple = 7
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : str = True
_UpperCAmelCase : Union[str, Any] = 9_9
_UpperCAmelCase : Dict = 3_2
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : str = 3_7
_UpperCAmelCase : Union[str, Any] = "gelu"
_UpperCAmelCase : Tuple = 0.1
_UpperCAmelCase : List[str] = 0.1
_UpperCAmelCase : Optional[int] = 5_1_2
_UpperCAmelCase : List[Any] = 1_6
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Dict = 0.02
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : Union[str, Any] = 4
_UpperCAmelCase : str = None
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
_UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Dict = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : int = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : int = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = TFEsmModel(config=_a )
_UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCAmelCase : Optional[int] = model(_a )
_UpperCAmelCase : Union[str, Any] = [input_ids, input_mask]
_UpperCAmelCase : Union[str, Any] = model(_a )
_UpperCAmelCase : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Tuple = TFEsmModel(config=_a )
_UpperCAmelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
_UpperCAmelCase : int = model(_a )
_UpperCAmelCase : List[str] = [input_ids, input_mask]
_UpperCAmelCase : str = model(_a , encoder_hidden_states=_a )
# Also check the case where encoder outputs are not passed
_UpperCAmelCase : Optional[int] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = TFEsmForMaskedLM(config=_a )
_UpperCAmelCase : List[Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Optional[Any] = TFEsmForTokenClassification(config=_a )
_UpperCAmelCase : str = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCAmelCase : Union[str, Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : int = config_and_inputs
_UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Union[str, Any] = False
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = TFEsmModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = TFEsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip("Protein models do not support embedding resizing." )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("Protein models do not support embedding resizing." )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[int] = model_class(_a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCAmelCase : List[Any] = model.get_bias()
assert isinstance(_a , _a )
for k, v in name.items():
assert isinstance(_a , tf.Variable )
else:
_UpperCAmelCase : int = model.get_output_embeddings()
assert x is None
_UpperCAmelCase : int = model.get_bias()
assert name is None
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Dict = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
_UpperCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase : List[Any] = model(_a )[0]
_UpperCAmelCase : List[Any] = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , _a )
# compare the actual values for a slice.
_UpperCAmelCase : Union[str, Any] = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
_UpperCAmelCase : Any = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_UpperCAmelCase : Optional[Any] = model(_a )[0]
# compare the actual values for a slice.
_UpperCAmelCase : int = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 145
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = CycleDiffusionPipeline
_lowerCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
_lowerCamelCase : int = PipelineTesterMixin.required_optional_params - {'latents'}
_lowerCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
_lowerCamelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self : int ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(_a )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int]=0 ):
A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
A_ = image / 2 + 0.5
if str(_a ).startswith("mps" ):
A_ = torch.manual_seed(_a )
else:
A_ = torch.Generator(device=_a ).manual_seed(_a )
A_ = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def __A ( self : Any ):
A_ = "cpu" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = CycleDiffusionPipeline(**_a )
A_ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ = self.get_dummy_inputs(_a )
A_ = pipe(**_a )
A_ = output.images
A_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __A ( self : List[Any] ):
A_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(_a , "half" ):
A_ = module.half()
A_ = CycleDiffusionPipeline(**_a )
A_ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ = self.get_dummy_inputs(_a )
A_ = pipe(**_a )
A_ = output.images
A_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __A ( self : str ):
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def __A ( self : Any ):
return super().test_inference_batch_single_identical()
@skip_mps
def __A ( self : Tuple ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __A ( self : List[str] ):
return super().test_save_load_optional_components()
@skip_mps
def __A ( self : Optional[Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
A_ = init_image.resize((512, 512) )
A_ = "CompVis/stable-diffusion-v1-4"
A_ = DDIMScheduler.from_pretrained(_a , subfolder="scheduler" )
A_ = CycleDiffusionPipeline.from_pretrained(
_a , scheduler=_a , safety_checker=_a , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
A_ = "A black colored car"
A_ = "A blue colored car"
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=_a , source_prompt=_a , image=_a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_a , output_type="np" , )
A_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __A ( self : List[Any] ):
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
A_ = init_image.resize((512, 512) )
A_ = "CompVis/stable-diffusion-v1-4"
A_ = DDIMScheduler.from_pretrained(_a , subfolder="scheduler" )
A_ = CycleDiffusionPipeline.from_pretrained(_a , scheduler=_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
A_ = "A black colored car"
A_ = "A blue colored car"
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=_a , source_prompt=_a , image=_a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_a , output_type="np" , )
A_ = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 312
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 45
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowerCAmelCase__ ) == 1:
return True
lowerCamelCase_ = series[1] - series[0]
for index in range(len(lowerCAmelCase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
lowerCamelCase_ = 0
for val in series:
answer += val
return answer / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
|
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> int:
__a = len(lowerCAmelCase__ )
__a = int(math.floor(math.sqrt(lowerCAmelCase__ ) ) )
__a = 0
while arr[min(lowerCAmelCase__ , lowerCAmelCase__ ) - 1] < x:
__a = step
step += int(math.floor(math.sqrt(lowerCAmelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__a = prev + 1
if prev == min(lowerCAmelCase__ , lowerCAmelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be searched:\n"))
lowercase_ = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F'''Number {x} is at index {res}''')
| 45
| 0
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[int] , snake_case :int , snake_case :int=14 , snake_case :str=7 , snake_case :Dict=True , snake_case :Union[str, Any]=True , snake_case :Optional[int]=True , snake_case :Any=True , snake_case :List[Any]=True , snake_case :Union[str, Any]=99 , snake_case :Any=32 , snake_case :Optional[Any]=5 , snake_case :Optional[int]=4 , snake_case :List[Any]=37 , snake_case :Optional[int]="gelu" , snake_case :Union[str, Any]=0.1 , snake_case :List[Any]=0.1 , snake_case :int=512 , snake_case :List[Any]=16 , snake_case :Any=2 , snake_case :Any=0.02 , snake_case :str=3 , snake_case :Any=4 , snake_case :str=None , ):
'''simple docstring'''
A_ : Dict = parent
A_ : List[str] = batch_size
A_ : int = seq_length
A_ : List[str] = is_training
A_ : List[Any] = use_token_type_ids
A_ : str = use_input_mask
A_ : Dict = use_labels
A_ : Tuple = use_mc_token_ids
A_ : Optional[int] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Any = hidden_act
A_ : str = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[Any] = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : Dict = num_labels
A_ : Any = num_choices
A_ : Any = scope
A_ : Optional[Any] = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[str] = None
if self.use_input_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : int = None
if self.use_token_type_ids:
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
if self.use_mc_token_ids:
A_ : int = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
A_ : List[str] = None
A_ : Dict = None
A_ : Tuple = None
if self.use_labels:
A_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
A_ : Dict = self.get_config()
A_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :str , snake_case :Dict , snake_case :str , snake_case :str , snake_case :Optional[Any] , *snake_case :List[str] ):
'''simple docstring'''
A_ : int = CTRLModel(config=_a )
model.to(_a )
model.eval()
model(_a , token_type_ids=_a , head_mask=_a )
model(_a , token_type_ids=_a )
A_ : Tuple = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[str] , snake_case :Tuple , snake_case :List[str] , snake_case :List[str] , snake_case :str , *snake_case :List[Any] ):
'''simple docstring'''
A_ : int = CTRLLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : int = config_and_inputs
A_ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Tuple , snake_case :str , snake_case :Tuple , snake_case :Union[str, Any] , *snake_case :Any ):
'''simple docstring'''
A_ : Dict = self.num_labels
A_ : Optional[Any] = CTRLForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCamelCase = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Any , snake_case :Any , snake_case :Any , snake_case :List[Any] , snake_case :Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Dict = CTRLModelTester(self )
A_ : Dict = ConfigTester(self , config_class=_a , n_embd=37 )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_a )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = CTRLModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
pass
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : int = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(_a )
A_ : List[Any] = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_a ) # Legal the president is
A_ : List[Any] = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
A_ : Union[str, Any] = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].tolist() , _a )
| 300
|
"""simple docstring"""
lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__a = year // 100
__a = (5 * (century % 4) + 2) % 7
__a = year % 100
__a = centurian % 12
__a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : Optional[int] = """\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"""
_UpperCAmelCase : Tuple = """\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"""
_UpperCAmelCase : Optional[Any] = """\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"""
def __magic_name__( lowerCamelCase, lowerCamelCase):
return float((preds == labels).mean())
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = simple_accuracy(lowerCAmelCase__, lowerCAmelCase__)
__lowerCAmelCase = float(fa_score(y_true=lowerCAmelCase__, y_pred=lowerCAmelCase__))
return {
"accuracy": acc,
"f1": fa,
}
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = float(pearsonr(lowerCAmelCase__, lowerCAmelCase__)[0])
__lowerCAmelCase = float(spearmanr(lowerCAmelCase__, lowerCAmelCase__)[0])
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _snake_case (self , __lowercase , __lowercase ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "stsb":
return pearson_and_spearman(_a , _a )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_a , _a )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 174
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(lowerCAmelCase__ ) == 1:
return True
__a = series[1] - series[0]
for index in range(len(lowerCAmelCase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowercase ( lowerCAmelCase__ : list ) -> float:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__a = 0
for val in series:
answer += val
return answer / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ = datasets.utils.logging.get_logger(__name__)
class __UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = datasets.Audio()
lowerCAmelCase_ = 'audio'
lowerCAmelCase_ = AudioFolderConfig
lowerCAmelCase_ = 42 # definition at the bottom of the script
lowerCAmelCase_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
lowercase_ = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowercase_ = AUDIO_EXTENSIONS
| 303
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase ( lowerCAmelCase__ : Namespace ) -> Tuple:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase_ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _a ):
__a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_a , required=_a , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_a , required=_a , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_a , required=_a , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_a , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_a , default=_a , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a , _a , _a , *_a , ):
__a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
__a = model_type
__a = tf_checkpoint
__a = pytorch_dump_output
__a = config
__a = finetuning_task_name
def __UpperCAmelCase ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
if "ckpt" in self._tf_checkpoint.lower():
__a = self._tf_checkpoint
__a = ''''''
else:
__a = self._tf_checkpoint
__a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_a , self._config , self._pytorch_dump_output , _a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 45
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowercase ( a__ : str ) -> Optional[Any]:
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def lowercase ( a__ : Optional[Any] ) -> List[Any]:
if "stem.conv" in name:
_UpperCamelCase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
_UpperCamelCase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase = '''bit.encoder.''' + name
return name
def lowercase ( ) -> List[Any]:
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowercase ( a__ : Any , a__ : int , a__ : List[Any]=False ) -> Dict:
_UpperCamelCase = get_config(lowerCAmelCase__ )
# load original model from timm
_UpperCamelCase = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase = state_dict.pop(lowerCAmelCase__ )
_UpperCamelCase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
_UpperCamelCase = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
_UpperCamelCase = transform.transforms
_UpperCamelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_UpperCamelCase = prepare_img()
_UpperCamelCase = transform(lowerCAmelCase__ ).unsqueeze(0 )
_UpperCamelCase = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
_UpperCamelCase = model(lowerCAmelCase__ )
_UpperCamelCase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 256
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'beit'
def __init__( self , _a=8_192 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=False , _a=False , _a=False , _a=False , _a=0.1 , _a=0.1 , _a=True , _a=[3, 5, 7, 11] , _a=[1, 2, 3, 6] , _a=True , _a=0.4 , _a=256 , _a=1 , _a=False , _a=255 , **_a , ):
super().__init__(**_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = use_mask_token
__a = use_absolute_position_embeddings
__a = use_relative_position_bias
__a = use_shared_relative_position_bias
__a = layer_scale_init_value
__a = drop_path_rate
__a = use_mean_pooling
# decode head attributes (semantic segmentation)
__a = out_indices
__a = pool_scales
# auxiliary head attributes (semantic segmentation)
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = semantic_loss_ignore_index
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 45
| 0
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
SCREAMING_SNAKE_CASE__:Dict = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def _lowerCamelCase( a , a , a ):
__a = from_type.lower().strip("s" )
__a = to_type.lower().strip("s" )
__a = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
__a = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
if from_sanitized not in METRIC_CONVERSION:
__a = (
F"Invalid \'from_type\' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(lowerCAmelCase__ )}"
)
raise ValueError(lowerCAmelCase__ )
if to_sanitized not in METRIC_CONVERSION:
__a = (
F"Invalid \'to_type\' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(lowerCAmelCase__ )}"
)
raise ValueError(lowerCAmelCase__ )
__a = METRIC_CONVERSION[from_sanitized]
__a = METRIC_CONVERSION[to_sanitized]
__a = 1
if from_exponent > to_exponent:
__a = from_exponent - to_exponent
else:
__a = -(to_exponent - from_exponent)
return value * pow(1_0 , lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 261
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str ) -> list:
if n_term == "":
return []
__a = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
lowercase_ = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 45
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 10_00 ):
'''simple docstring'''
lowerCAmelCase = 1
lowerCAmelCase = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
lowerCAmelCase = []
lowerCAmelCase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = FlaubertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lengths=lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = FlaubertWithLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = FlaubertForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = FlaubertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """traced_model.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 46
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(add_help=SCREAMING_SNAKE_CASE , allow_abbrev=SCREAMING_SNAKE_CASE )
# The main config parser
lowerCAmelCase = config_command_parser(SCREAMING_SNAKE_CASE )
# The subparser to add commands to
lowerCAmelCase = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
update_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
return config_parser
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = get_config_parser()
lowerCAmelCase = config_parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 46
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = BigBirdTokenizer
_SCREAMING_SNAKE_CASE = BigBirdTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def _snake_case ( self ) -> List[str]:
super().setUp()
lowerCAmelCase = self.tokenizer_class(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = """<s>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(lowercase ) , 1_004 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def _snake_case ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(lowercase )
lowerCAmelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = BigBirdTokenizer(lowercase , keep_accents=lowercase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self ) -> Tuple:
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
lowerCAmelCase = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@require_torch
@slow
def _snake_case ( self ) -> Tuple:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase = """ """.join(lowercase )
lowerCAmelCase = self.big_tokenizer.encode_plus(lowercase , return_tensors="""pt""" , return_token_type_ids=lowercase )
lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowercase )
lowerCAmelCase = BigBirdConfig(attention_type="""original_full""" )
lowerCAmelCase = BigBirdModel(lowercase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase )
model(**lowercase )
@slow
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
lowerCAmelCase = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def _snake_case ( self ) -> Optional[int]:
# fmt: off
lowerCAmelCase = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 46
| 1
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase :
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = inputs["""prompt"""]
lowerCAmelCase = inputs["""generator"""]
lowerCAmelCase = inputs["""num_inference_steps"""]
lowerCAmelCase = inputs["""output_type"""]
if "image" in inputs:
lowerCAmelCase = inputs["""image"""]
else:
lowerCAmelCase = None
if "mask_image" in inputs:
lowerCAmelCase = inputs["""mask_image"""]
else:
lowerCAmelCase = None
if "original_image" in inputs:
lowerCAmelCase = inputs["""original_image"""]
else:
lowerCAmelCase = None
lowerCAmelCase , lowerCAmelCase = pipe.encode_prompt(lowercase )
# inputs with prompt converted to embeddings
lowerCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase = image
if mask_image is not None:
lowerCAmelCase = mask_image
if original_image is not None:
lowerCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase , lowercase , lowercase )
lowerCAmelCase = pipe(**lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase )
lowerCAmelCase = self.pipeline_class.from_pretrained(lowercase )
pipe_loaded.to(lowercase )
pipe_loaded.set_progress_bar_config(disable=lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase , lowercase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = inputs["""generator"""]
lowerCAmelCase = inputs["""num_inference_steps"""]
lowerCAmelCase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase = image
if mask_image is not None:
lowerCAmelCase = mask_image
if original_image is not None:
lowerCAmelCase = original_image
lowerCAmelCase = pipe_loaded(**lowercase )[0]
lowerCAmelCase = np.abs(to_np(lowercase ) - to_np(lowercase ) ).max()
self.assertLess(lowercase , 1e-4 )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = pipe(**lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase )
lowerCAmelCase = self.pipeline_class.from_pretrained(lowercase )
pipe_loaded.to(lowercase )
pipe_loaded.set_progress_bar_config(disable=lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = pipe_loaded(**lowercase )[0]
lowerCAmelCase = np.abs(to_np(lowercase ) - to_np(lowercase ) ).max()
self.assertLess(lowercase , 1e-4 )
| 46
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowercase :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = """gelu"""
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def _snake_case ( self ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertModel(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = TFDistilBertForMaskedLM(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertForQuestionAnswering(config=lowercase )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForSequenceClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFDistilBertForMultipleChoice(lowercase )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForTokenClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Dict:
lowerCAmelCase = TFDistilBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , dim=37 )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def _snake_case ( self ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
lowerCAmelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4 )
| 46
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> Tuple:
lowerCAmelCase = path_or_paths
lowerCAmelCase = split if split or isinstance(lowercase , lowercase ) else """train"""
lowerCAmelCase = features
lowerCAmelCase = cache_dir
lowerCAmelCase = keep_in_memory
lowerCAmelCase = streaming
lowerCAmelCase = num_proc
lowerCAmelCase = kwargs
@abstractmethod
def _snake_case ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> str:
lowerCAmelCase = features
lowerCAmelCase = cache_dir
lowerCAmelCase = keep_in_memory
lowerCAmelCase = streaming
lowerCAmelCase = num_proc
lowerCAmelCase = kwargs
@abstractmethod
def _snake_case ( self ) -> Union[Dataset, IterableDataset]:
pass
| 46
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
SCREAMING_SNAKE_CASE__ = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase = unk_token if pad_token is None else pad_token
lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase = re.compile(
f'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> str:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _snake_case ( self ) -> int:
return len(self.sp_model )
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = self.non_printing_characters_re.sub("""""" , lowercase )
# Normalize whitespaces
lowerCAmelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase = unicodedata.normalize("""NFC""" , lowercase )
return text
def _snake_case ( self , lowercase , **lowercase ) -> List[str]:
lowerCAmelCase = self.preprocess_text(lowercase )
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.sp_model.PieceToId(lowercase )
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.IdToPiece(lowercase )
@staticmethod
def _snake_case ( lowercase ) -> str:
return out_string
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = []
lowerCAmelCase = """"""
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
lowerCAmelCase = False
out_string += self.sp_model.decode(lowercase )
return out_string
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _snake_case ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowercase , lowercase ):
lowerCAmelCase = self.preprocess_text(lowercase )
lowerCAmelCase = self.sp_model.encode(lowercase )
else:
lowerCAmelCase = [self.preprocess_text(lowercase ) for t in text]
lowerCAmelCase = self.sp_model.encode(lowercase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase = torch.tensor(lowercase )
return token_ids
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.decode(lowercase )
def _snake_case ( self , lowercase ) -> List[int]:
lowerCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowercase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowercase )
| 46
| 1
|
"""simple docstring"""
from math import isqrt, loga
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 80_08_00 , SCREAMING_SNAKE_CASE : int = 80_08_00 ):
'''simple docstring'''
lowerCAmelCase = degree * loga(SCREAMING_SNAKE_CASE )
lowerCAmelCase = int(SCREAMING_SNAKE_CASE )
lowerCAmelCase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'{solution() = }')
| 46
|
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
SCREAMING_SNAKE_CASE__ = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
lowerCAmelCase = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _snake_case ( self , lowercase , lowercase ) -> Any:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 46
| 1
|
"""simple docstring"""
from math import factorial
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 20 ):
'''simple docstring'''
lowerCAmelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCAmelCase = n // 2
return int(factorial(SCREAMING_SNAKE_CASE ) / (factorial(SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 46
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'imagegpt'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=512 + 1 , lowercase=32 * 32 , lowercase=512 , lowercase=24 , lowercase=8 , lowercase=None , lowercase="quick_gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , **lowercase , ) -> Any:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase , **lowercase )
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _snake_case ( self , lowercase , lowercase = 1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 32 , lowercase = 32 , ) -> Mapping[str, Any]:
lowerCAmelCase = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase )
lowerCAmelCase = dict(preprocessor(images=lowercase , return_tensors=lowercase ) )
return inputs
| 46
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'naver-clova-ix/donut-base-finetuned-docvqa'
_SCREAMING_SNAKE_CASE = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
_SCREAMING_SNAKE_CASE = 'document_qa'
_SCREAMING_SNAKE_CASE = AutoProcessor
_SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel
_SCREAMING_SNAKE_CASE = ['image', 'text']
_SCREAMING_SNAKE_CASE = ['text']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCAmelCase = task_prompt.replace("""{user_input}""" , lowercase )
lowerCAmelCase = self.pre_processor.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors="""pt""" ).input_ids
lowerCAmelCase = self.pre_processor(lowercase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case ( self , lowercase ) -> Optional[Any]:
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowercase , ).sequences
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.pre_processor.batch_decode(lowercase )[0]
lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
lowerCAmelCase = re.sub(r"""<.*?>""" , """""" , lowercase , count=1 ).strip() # remove first task start token
lowerCAmelCase = self.pre_processor.tokenajson(lowercase )
return sequence["answer"]
| 46
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 46
| 1
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = "cpu" , SCREAMING_SNAKE_CASE : Union[str, None] = None ):
'''simple docstring'''
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCAmelCase = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase = src_path
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
fire.Fire(convert)
| 46
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCAmelCase = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = nlp
lowerCAmelCase = reader
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self._nlp, []
for entry in self._reader:
lowerCAmelCase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase = self._reader.save_binary(lowercase )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowercase )
| 46
| 1
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowercase ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase=0.0 , lowercase = None , lowercase = "geglu" , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = "layer_norm" , lowercase = False , ) -> Dict:
super().__init__()
lowerCAmelCase = only_cross_attention
lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCAmelCase = AdaLayerNorm(lowercase , lowercase )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase = AdaLayerNormZero(lowercase , lowercase )
else:
lowerCAmelCase = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
lowerCAmelCase = Attention(
query_dim=lowercase , heads=lowercase , dim_head=lowercase , dropout=lowercase , bias=lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowercase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCAmelCase = (
AdaLayerNorm(lowercase , lowercase )
if self.use_ada_layer_norm
else nn.LayerNorm(lowercase , elementwise_affine=lowercase )
)
lowerCAmelCase = Attention(
query_dim=lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowercase , dim_head=lowercase , dropout=lowercase , bias=lowercase , upcast_attention=lowercase , ) # is self-attn if encoder_hidden_states is none
else:
lowerCAmelCase = None
lowerCAmelCase = None
# 3. Feed-forward
lowerCAmelCase = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
lowerCAmelCase = FeedForward(lowercase , dropout=lowercase , activation_fn=lowercase , final_dropout=lowercase )
# let chunk size default to None
lowerCAmelCase = None
lowerCAmelCase = 0
def _snake_case ( self , lowercase , lowercase ) -> List[Any]:
# Sets chunk feed-forward
lowerCAmelCase = chunk_size
lowerCAmelCase = dim
def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> List[str]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowerCAmelCase = self.norma(lowercase , lowercase )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.norma(
lowercase , lowercase , lowercase , hidden_dtype=hidden_states.dtype )
else:
lowerCAmelCase = self.norma(lowercase )
lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCAmelCase = self.attna(
lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowercase , **lowercase , )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCAmelCase = (
self.norma(lowercase , lowercase ) if self.use_ada_layer_norm else self.norma(lowercase )
)
lowerCAmelCase = self.attna(
lowercase , encoder_hidden_states=lowercase , attention_mask=lowercase , **lowercase , )
lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
lowerCAmelCase = self.norma(lowercase )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCAmelCase = torch.cat(
[self.ff(lowercase ) for hid_slice in norm_hidden_states.chunk(lowercase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCAmelCase = self.ff(lowercase )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
lowerCAmelCase = ff_output + hidden_states
return hidden_states
class lowercase ( nn.Module ):
def __init__( self , lowercase , lowercase = None , lowercase = 4 , lowercase = 0.0 , lowercase = "geglu" , lowercase = False , ) -> Any:
super().__init__()
lowerCAmelCase = int(dim * mult )
lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCAmelCase = GELU(lowercase , lowercase )
if activation_fn == "gelu-approximate":
lowerCAmelCase = GELU(lowercase , lowercase , approximate="""tanh""" )
elif activation_fn == "geglu":
lowerCAmelCase = GEGLU(lowercase , lowercase )
elif activation_fn == "geglu-approximate":
lowerCAmelCase = ApproximateGELU(lowercase , lowercase )
lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(lowercase )
# project dropout
self.net.append(nn.Dropout(lowercase ) )
# project out
self.net.append(nn.Linear(lowercase , lowercase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowercase ) )
def _snake_case ( self , lowercase ) -> Tuple:
for module in self.net:
lowerCAmelCase = module(lowercase )
return hidden_states
class lowercase ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = "none" ) -> Optional[int]:
super().__init__()
lowerCAmelCase = nn.Linear(lowercase , lowercase )
lowerCAmelCase = approximate
def _snake_case ( self , lowercase ) -> Optional[Any]:
if gate.device.type != "mps":
return F.gelu(lowercase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _snake_case ( self , lowercase ) -> List[str]:
lowerCAmelCase = self.proj(lowercase )
lowerCAmelCase = self.gelu(lowercase )
return hidden_states
class lowercase ( nn.Module ):
def __init__( self , lowercase , lowercase ) -> int:
super().__init__()
lowerCAmelCase = nn.Linear(lowercase , dim_out * 2 )
def _snake_case ( self , lowercase ) -> Dict:
if gate.device.type != "mps":
return F.gelu(lowercase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.proj(lowercase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(lowercase )
class lowercase ( nn.Module ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase = nn.Linear(lowercase , lowercase )
def _snake_case ( self , lowercase ) -> int:
lowerCAmelCase = self.proj(lowercase )
return x * torch.sigmoid(1.702 * x )
class lowercase ( nn.Module ):
def __init__( self , lowercase , lowercase ) -> Tuple:
super().__init__()
lowerCAmelCase = nn.Embedding(lowercase , lowercase )
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Linear(lowercase , embedding_dim * 2 )
lowerCAmelCase = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
def _snake_case ( self , lowercase , lowercase ) -> Any:
lowerCAmelCase = self.linear(self.silu(self.emb(lowercase ) ) )
lowerCAmelCase , lowerCAmelCase = torch.chunk(lowercase , 2 )
lowerCAmelCase = self.norm(lowercase ) * (1 + scale) + shift
return x
class lowercase ( nn.Module ):
def __init__( self , lowercase , lowercase ) -> Any:
super().__init__()
lowerCAmelCase = CombinedTimestepLabelEmbeddings(lowercase , lowercase )
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Linear(lowercase , 6 * embedding_dim , bias=lowercase )
lowerCAmelCase = nn.LayerNorm(lowercase , elementwise_affine=lowercase , eps=1e-6 )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase=None ) -> int:
lowerCAmelCase = self.linear(self.silu(self.emb(lowercase , lowercase , hidden_dtype=lowercase ) ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = emb.chunk(6 , dim=1 )
lowerCAmelCase = self.norm(lowercase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowercase ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = 1e-5 ) -> int:
super().__init__()
lowerCAmelCase = num_groups
lowerCAmelCase = eps
if act_fn is None:
lowerCAmelCase = None
else:
lowerCAmelCase = get_activation(lowercase )
lowerCAmelCase = nn.Linear(lowercase , out_dim * 2 )
def _snake_case ( self , lowercase , lowercase ) -> Union[str, Any]:
if self.act:
lowerCAmelCase = self.act(lowercase )
lowerCAmelCase = self.linear(lowercase )
lowerCAmelCase = emb[:, :, None, None]
lowerCAmelCase , lowerCAmelCase = emb.chunk(2 , dim=1 )
lowerCAmelCase = F.group_norm(lowercase , self.num_groups , eps=self.eps )
lowerCAmelCase = x * (1 + scale) + shift
return x
| 46
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False, False, False
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = None
# Automatically constructed
_SCREAMING_SNAKE_CASE = "dict"
_SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_SCREAMING_SNAKE_CASE = field(default='Audio' , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self ) -> Union[str, Any]:
return self.pa_type
def _snake_case ( self , lowercase ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowercase , lowercase ):
return {"bytes": None, "path": value}
elif isinstance(lowercase , lowercase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase = BytesIO()
sf.write(lowercase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowerCAmelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
lowerCAmelCase = BytesIO(bytes() )
sf.write(lowercase , lowercase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _snake_case ( self , lowercase , lowercase = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCAmelCase , lowerCAmelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCAmelCase = xsplitext(lowercase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCAmelCase = token_per_repo_id or {}
lowerCAmelCase = path.split("""::""" )[-1]
try:
lowerCAmelCase = string_to_dict(lowercase , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase = None
with xopen(lowercase , """rb""" , use_auth_token=lowercase ) as f:
lowerCAmelCase , lowerCAmelCase = sf.read(lowercase )
else:
lowerCAmelCase , lowerCAmelCase = sf.read(lowercase )
lowerCAmelCase = array.T
if self.mono:
lowerCAmelCase = librosa.to_mono(lowercase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase = librosa.resample(lowercase , orig_sr=lowercase , target_sr=self.sampling_rate )
lowerCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _snake_case ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _snake_case ( self , lowercase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCAmelCase = pa.array([Audio().encode_example(lowercase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCAmelCase = storage.field("""bytes""" )
else:
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCAmelCase = storage.field("""path""" )
else:
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowercase , self.pa_type )
def _snake_case ( self , lowercase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowercase ):
with xopen(lowercase , """rb""" ) as f:
lowerCAmelCase = f.read()
return bytes_
lowerCAmelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase = pa.array(
[os.path.basename(lowercase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase , self.pa_type )
| 46
| 1
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'Train language if it is different from the evaluation language.'} )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = train_dataset.features["""label"""].names
if training_args.do_eval:
lowerCAmelCase = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = eval_dataset.features["""label"""].names
if training_args.do_predict:
lowerCAmelCase = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = predict_dataset.features["""label"""].names
# Labels
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel={str(SCREAMING_SNAKE_CASE ): label for i, label in enumerate(SCREAMING_SNAKE_CASE )} , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase = False
def preprocess_function(SCREAMING_SNAKE_CASE : Dict ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length , truncation=SCREAMING_SNAKE_CASE , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
lowerCAmelCase = train_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowerCAmelCase = train_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
lowerCAmelCase = eval_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowerCAmelCase = eval_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_predict_samples )
lowerCAmelCase = predict_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
lowerCAmelCase = predict_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
lowerCAmelCase = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ):
lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE ) else p.predictions
lowerCAmelCase = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase = default_data_collator
elif training_args.fpaa:
lowerCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
lowerCAmelCase = None
# Initialize our Trainer
lowerCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
lowerCAmelCase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
lowerCAmelCase = train_result.metrics
lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE )
)
lowerCAmelCase = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , SCREAMING_SNAKE_CASE )
trainer.save_metrics("""train""" , SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE )
lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE )
trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = trainer.predict(SCREAMING_SNAKE_CASE , metric_key_prefix="""predict""" )
lowerCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE )
)
lowerCAmelCase = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""predict""" , SCREAMING_SNAKE_CASE )
trainer.save_metrics("""predict""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
lowerCAmelCase = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 46
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if "bot_conv" in key:
lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase = value
return new_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase = rename_keys(SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 46
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCAmelCase = 1_92
lowerCAmelCase = 7_68
lowerCAmelCase = 12
lowerCAmelCase = 3
lowerCAmelCase = [8_00, 13_33]
lowerCAmelCase = False
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase = 3_30
lowerCAmelCase = 14
lowerCAmelCase = 6
lowerCAmelCase = 13_20
elif "yolos_s" in yolos_name:
lowerCAmelCase = 3_84
lowerCAmelCase = 15_36
lowerCAmelCase = 12
lowerCAmelCase = 6
elif "yolos_b" in yolos_name:
lowerCAmelCase = [8_00, 13_44]
lowerCAmelCase = 91
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = """coco-detection-id2label.json"""
lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : YolosConfig , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase = in_proj_bias[: config.hidden_size]
lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase = in_proj_weight[-config.hidden_size :, :]
lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if "backbone" in name:
lowerCAmelCase = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
lowerCAmelCase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
lowerCAmelCase = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
lowerCAmelCase = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
lowerCAmelCase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
lowerCAmelCase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
lowerCAmelCase = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
lowerCAmelCase = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
lowerCAmelCase = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : YolosForObjectDetection ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowerCAmelCase = key.split(""".""" )
lowerCAmelCase = int(key_split[2] )
lowerCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[
dim : dim * 2, :
]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
else:
lowerCAmelCase = val
return orig_state_dict
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE )
# load original state_dict
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
# load 🤗 model
lowerCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCAmelCase = 8_00 if yolos_name != """yolos_ti""" else 5_12
lowerCAmelCase = YolosImageProcessor(format="""coco_detection""" , size=SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = outputs.logits, outputs.pred_boxes
lowerCAmelCase , lowerCAmelCase = None, None
if yolos_name == "yolos_ti":
lowerCAmelCase = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCAmelCase = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCAmelCase = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCAmelCase = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCAmelCase = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCAmelCase = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCAmelCase = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCAmelCase = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCAmelCase = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
lowerCAmelCase = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
lowerCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE , organization="""hustvl""" )
model.push_to_hub(SCREAMING_SNAKE_CASE , organization="""hustvl""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 46
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase :
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = inputs["""prompt"""]
lowerCAmelCase = inputs["""generator"""]
lowerCAmelCase = inputs["""num_inference_steps"""]
lowerCAmelCase = inputs["""output_type"""]
if "image" in inputs:
lowerCAmelCase = inputs["""image"""]
else:
lowerCAmelCase = None
if "mask_image" in inputs:
lowerCAmelCase = inputs["""mask_image"""]
else:
lowerCAmelCase = None
if "original_image" in inputs:
lowerCAmelCase = inputs["""original_image"""]
else:
lowerCAmelCase = None
lowerCAmelCase , lowerCAmelCase = pipe.encode_prompt(lowercase )
# inputs with prompt converted to embeddings
lowerCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase = image
if mask_image is not None:
lowerCAmelCase = mask_image
if original_image is not None:
lowerCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase , lowercase , lowercase )
lowerCAmelCase = pipe(**lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase )
lowerCAmelCase = self.pipeline_class.from_pretrained(lowercase )
pipe_loaded.to(lowercase )
pipe_loaded.set_progress_bar_config(disable=lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase , lowercase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = inputs["""generator"""]
lowerCAmelCase = inputs["""num_inference_steps"""]
lowerCAmelCase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase = image
if mask_image is not None:
lowerCAmelCase = mask_image
if original_image is not None:
lowerCAmelCase = original_image
lowerCAmelCase = pipe_loaded(**lowercase )[0]
lowerCAmelCase = np.abs(to_np(lowercase ) - to_np(lowercase ) ).max()
self.assertLess(lowercase , 1e-4 )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = pipe(**lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase )
lowerCAmelCase = self.pipeline_class.from_pretrained(lowercase )
pipe_loaded.to(lowercase )
pipe_loaded.set_progress_bar_config(disable=lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = pipe_loaded(**lowercase )[0]
lowerCAmelCase = np.abs(to_np(lowercase ) - to_np(lowercase ) ).max()
self.assertLess(lowercase , 1e-4 )
| 46
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'summarization'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = 'rouge2'
def __init__( self , lowercase , **lowercase ) -> str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase = 0
lowerCAmelCase = defaultdict(lowercase )
lowerCAmelCase = self.config.model_type
lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase = get_git_info()["""repo_sha"""]
lowerCAmelCase = hparams.num_workers
lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase = self.decoder_start_token_id
lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCAmelCase = False
lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase = self.hparams.eval_max_gen_length
else:
lowerCAmelCase = self.model.config.max_length
lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _snake_case ( self , lowercase ) -> Dict[str, List[str]]:
lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCAmelCase = True
return readable_batch
def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]:
return self.model(lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = self.tokenizer.pad_token_id
lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , lowercase ):
lowerCAmelCase = self.model._shift_right(lowercase )
else:
lowerCAmelCase = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase = decoder_input_ids
self.save_readable_batch(lowercase )
lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 )
lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def _snake_case ( self ) -> int:
return self.tokenizer.pad_token_id
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].shape[0]
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase , lowercase="val" ) -> Dict:
self.step_count += 1
lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase = losses["""loss"""]
lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return calculate_rouge(lowercase , lowercase )
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCAmelCase = self.ids_to_clean_text(lowercase )
lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase )
lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.validation_epoch_end(lowercase , prefix="""test""" )
def _snake_case ( self , lowercase ) -> SeqaSeqDataset:
lowerCAmelCase = self.n_obs[type_path]
lowerCAmelCase = self.target_lens[type_path]
lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCAmelCase = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def _snake_case ( self ) -> DataLoader:
lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase )
parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'translation'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ['bleu']
_SCREAMING_SNAKE_CASE = 'bleu'
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
super().__init__(lowercase , **lowercase )
lowerCAmelCase = hparams.src_lang
lowerCAmelCase = hparams.tgt_lang
def _snake_case ( self , lowercase , lowercase ) -> dict:
return calculate_bleu(lowercase , lowercase )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE )
lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase = False
lowerCAmelCase = args.val_metric == """loss"""
lowerCAmelCase = generic_train(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCAmelCase = """"""
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) )
if checkpoints:
lowerCAmelCase = checkpoints[-1]
lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 46
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 2_00 ):
'''simple docstring'''
lowerCAmelCase = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
lowerCAmelCase = [0] * (pence + 1)
lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 46
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46
| 1
|
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowerCAmelCase = parser.parse_args()
return args.f
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , """all_results.json""" )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , """r""" ) as f:
lowerCAmelCase = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'can\'t find {path}' )
return results
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( _UpperCAmelCase ):
@classmethod
def _snake_case ( cls ) -> List[str]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _snake_case ( cls ) -> List[Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> Dict:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCAmelCase = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """translation_no_trainer""" ) ) )
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase )
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """image_classification_no_trainer""" ) ) )
| 46
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
SCREAMING_SNAKE_CASE__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
SCREAMING_SNAKE_CASE__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _snake_case ( self , lowercase , lowercase , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ) -> Optional[Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in predictions] )
lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in references] )
else:
lowerCAmelCase = np.asarray(lowercase )
lowerCAmelCase = np.asarray(lowercase )
if ignore_case:
lowerCAmelCase = np.char.lower(lowercase )
lowerCAmelCase = np.char.lower(lowercase )
if ignore_punctuation:
lowerCAmelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
if ignore_numbers:
lowerCAmelCase = string.digits.maketrans("""""" , """""" , string.digits )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = predictions == references
return {"exact_match": np.mean(lowercase ) * 100}
| 46
| 1
|
"""simple docstring"""
from math import isclose, sqrt
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase = point_y / 4 / point_x
lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase = outgoing_gradient**2 + 4
lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase = x_minus if isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else x_plus
lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float = 1.4 , SCREAMING_SNAKE_CASE : float = -9.6 ):
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = first_x_coord
lowerCAmelCase = first_y_coord
lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = next_point(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'{solution() = }')
| 46
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , SCREAMING_SNAKE_CASE )
print("""Decoded:""" , decode(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 46
| 1
|
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowercase ( _UpperCAmelCase ):
# warning at import time
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , _UpperCAmelCase , )
| 46
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while b:
lowerCAmelCase , lowerCAmelCase = b, a % b
return a
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE , a % b )
def UpperCAmelCase__ ( ):
'''simple docstring'''
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 46
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , lowercase , lowercase=13 , lowercase=[30, 30] , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=3 , lowercase=None , lowercase=8 , lowercase=10 , ) -> Dict:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = n_targets
lowerCAmelCase = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase = num_patches + 1 + self.num_detection_tokens
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCAmelCase = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase = []
for i in range(self.batch_size ):
lowerCAmelCase = {}
lowerCAmelCase = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=lowercase )
lowerCAmelCase = torch.rand(self.n_targets , 4 , device=lowercase )
labels.append(lowercase )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = YolosModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Tuple:
lowerCAmelCase = YolosForObjectDetection(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(pixel_values=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCAmelCase = model(pixel_values=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Union[str, Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase = []
for i in range(self.model_tester.batch_size ):
lowerCAmelCase = {}
lowerCAmelCase = torch.ones(
size=(self.model_tester.n_targets,) , device=lowercase , dtype=torch.long )
lowerCAmelCase = torch.ones(
self.model_tester.n_targets , 4 , device=lowercase , dtype=torch.float )
labels.append(lowercase )
lowerCAmelCase = labels
return inputs_dict
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = YolosModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
# in YOLOS, the seq_len is different
lowerCAmelCase = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase = len(lowercase )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = 1
self.assertEqual(out_len + added_hidden_states , len(lowercase ) )
lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _snake_case ( self ) -> Optional[Any]:
def check_hidden_states_output(lowercase , lowercase , lowercase ):
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# YOLOS has a different seq_length
lowerCAmelCase = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowercase )
@slow
def _snake_case ( self ) -> Dict:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = YolosModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[str]:
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(lowercase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(inputs.pixel_values )
# verify outputs
lowerCAmelCase = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=lowercase , )
lowerCAmelCase = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase , atol=1e-4 ) )
# verify postprocessing
lowerCAmelCase = image_processor.post_process_object_detection(
lowercase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCAmelCase = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(lowercase )
lowerCAmelCase = [75, 75, 17, 63, 17]
lowerCAmelCase = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(lowercase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , lowercase , atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , lowercase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , lowercase ) )
| 46
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
SCREAMING_SNAKE_CASE__ = {
"google/pegasus-xsum": 512,
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<mask_2>" , lowercase="<mask_1>" , lowercase=None , lowercase=103 , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowercase )}, but is'
f' {type(lowercase )}' )
lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCAmelCase = additional_special_tokens_extended
else:
lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , pad_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = mask_token_sent
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# add special tokens to encoder dict
lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def _snake_case ( self ) -> int:
return len(self.sp_model ) + self.offset
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase = self.sp_model.piece_to_id(lowercase )
return sp_id + self.offset
def _snake_case ( self , lowercase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _snake_case ( self , lowercase=False ) -> Tuple:
return 1
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , lowercase , lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 46
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
SCREAMING_SNAKE_CASE__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowerCAmelCase = """cpu"""
lowerCAmelCase = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
lowerCAmelCase = AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowerCAmelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCAmelCase = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 46
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'longformer'
def __init__( self , lowercase = 512 , lowercase = 2 , lowercase = 1 , lowercase = 0 , lowercase = 2 , lowercase = 30_522 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3_072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 2 , lowercase = 0.02 , lowercase = 1e-12 , lowercase = False , **lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = attention_window
lowerCAmelCase = sep_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = onnx_export
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase = "default" , lowercase = None ) -> Tuple:
super().__init__(lowercase , lowercase , lowercase )
lowerCAmelCase = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = super().outputs
if self.task == "default":
lowerCAmelCase = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1e-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowerCAmelCase = 1
return inputs
| 46
| 1
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'sigmoid'
_SCREAMING_SNAKE_CASE = 'softmax'
_SCREAMING_SNAKE_CASE = 'none'
@add_end_docstrings(
_UpperCAmelCase , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
def __init__( self , **lowercase ) -> Tuple:
super().__init__(**lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _snake_case ( self , lowercase=None , lowercase=None , lowercase="" , **lowercase ) -> Dict:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowerCAmelCase = tokenizer_kwargs
lowerCAmelCase = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
lowerCAmelCase = self.model.config.return_all_scores
if isinstance(lowercase , lowercase ) or top_k is None:
lowerCAmelCase = top_k
lowerCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , lowercase , )
if return_all_scores:
lowerCAmelCase = None
else:
lowerCAmelCase = 1
if isinstance(lowercase , lowercase ):
lowerCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *lowercase , **lowercase ) -> int:
lowerCAmelCase = super().__call__(*lowercase , **lowercase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase = """top_k""" not in kwargs
if isinstance(args[0] , lowercase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _snake_case ( self , lowercase , **lowercase ) -> Dict[str, GenericTensor]:
lowerCAmelCase = self.framework
if isinstance(lowercase , lowercase ):
return self.tokenizer(**lowercase , return_tensors=lowercase , **lowercase )
elif isinstance(lowercase , lowercase ) and len(lowercase ) == 1 and isinstance(inputs[0] , lowercase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowercase , **lowercase )
elif isinstance(lowercase , lowercase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Optional[Any]:
return self.model(**lowercase )
def _snake_case ( self , lowercase , lowercase=None , lowercase=1 , lowercase=True ) -> Optional[int]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
lowerCAmelCase = self.model.config.function_to_apply
else:
lowerCAmelCase = ClassificationFunction.NONE
lowerCAmelCase = model_outputs["""logits"""][0]
lowerCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase = sigmoid(lowercase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase = softmax(lowercase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase = outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(lowercase )
]
if not _legacy:
dict_scores.sort(key=lambda lowercase : x["score"] , reverse=lowercase )
if top_k is not None:
lowerCAmelCase = dict_scores[:top_k]
return dict_scores
| 46
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 3 , lowercase = 32 , lowercase = 256 , lowercase = 32 , lowercase = None , lowercase = 0.18_215 , lowercase = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
lowerCAmelCase = VectorQuantizer(lowercase , lowercase , beta=0.25 , remap=lowercase , sane_index_shape=lowercase )
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , norm_type=lowercase , )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = True ) -> VQEncoderOutput:
lowerCAmelCase = self.encoder(lowercase )
lowerCAmelCase = self.quant_conv(lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = False , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.quantize(lowercase )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(lowercase )
lowerCAmelCase = self.decoder(lowercase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
def _snake_case ( self , lowercase , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase = sample
lowerCAmelCase = self.encode(lowercase ).latents
lowerCAmelCase = self.decode(lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
| 46
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : LevitConfig , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool = True ):
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowerCAmelCase = timm.create_model("""levit_128s""" , pretrained=SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = timm.create_model("""levit_128""" , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 1_92:
lowerCAmelCase = timm.create_model("""levit_192""" , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 2_56:
lowerCAmelCase = timm.create_model("""levit_256""" , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 3_84:
lowerCAmelCase = timm.create_model("""levit_384""" , pretrained=SCREAMING_SNAKE_CASE )
from_model.eval()
lowerCAmelCase = LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase = OrderedDict()
lowerCAmelCase = from_model.state_dict()
lowerCAmelCase = list(from_model.state_dict().keys() )
lowerCAmelCase = list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.randn((2, 3, 2_24, 2_24) )
lowerCAmelCase = from_model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = our_model(SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
lowerCAmelCase = name
print(SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCAmelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : bool = True ):
'''simple docstring'''
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = 10_00
lowerCAmelCase = (1, num_labels)
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = num_labels
lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
"""levit-128S""": 1_28,
"""levit-128""": 1_28,
"""levit-192""": 1_92,
"""levit-256""": 2_56,
"""levit-384""": 3_84,
}
lowerCAmelCase = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 46
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """Morse code here!"""
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase = encrypt(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase = decrypt(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 46
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCAmelCase = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = nlp
lowerCAmelCase = reader
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self._nlp, []
for entry in self._reader:
lowerCAmelCase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase = self._reader.save_binary(lowercase )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowercase )
| 46
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
| 1
|
"""simple docstring"""
import string
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
lowerCAmelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCAmelCase = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE )
lowerCAmelCase = num - key
if num < 0:
lowerCAmelCase = num + len(string.ascii_uppercase )
lowerCAmelCase = translated + string.ascii_uppercase[num]
else:
lowerCAmelCase = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = input("""Encrypted message: """ )
lowerCAmelCase = message.upper()
decrypt(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = FlaubertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lengths=lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = FlaubertWithLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = FlaubertForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = FlaubertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """traced_model.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 46
| 1
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowercase :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Tuple:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> List[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , use_stable_embedding=lowercase , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = OpenLlamaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , )
lowerCAmelCase = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = OpenLlamaForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
# first forward pass
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , use_cache=lowercase , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , output_hidden_states=lowercase , )["""hidden_states"""][0]
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , past_key_values=lowercase , output_hidden_states=lowercase , )["""hidden_states"""][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Any:
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict["""input_ids"""]
lowerCAmelCase = input_ids.ne(1 ).to(lowercase )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = """single_label_classification"""
lowerCAmelCase = input_dict["""input_ids"""]
lowerCAmelCase = input_ids.ne(1 ).to(lowercase )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self ) -> Any:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = """multi_label_classification"""
lowerCAmelCase = input_dict["""input_ids"""]
lowerCAmelCase = input_ids.ne(1 ).to(lowercase )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _snake_case ( self ) -> str:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(lowercase )
original_model.to(lowercase )
original_model.eval()
lowerCAmelCase = original_model(lowercase ).last_hidden_state
lowerCAmelCase = original_model(lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {"""type""": scaling_type, """factor""": 10.0}
lowerCAmelCase = OpenLlamaModel(lowercase )
scaled_model.to(lowercase )
scaled_model.eval()
lowerCAmelCase = scaled_model(lowercase ).last_hidden_state
lowerCAmelCase = scaled_model(lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
| 46
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = BigBirdTokenizer
_SCREAMING_SNAKE_CASE = BigBirdTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def _snake_case ( self ) -> List[str]:
super().setUp()
lowerCAmelCase = self.tokenizer_class(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = """<s>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(lowercase ) , 1_004 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def _snake_case ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(lowercase )
lowerCAmelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = BigBirdTokenizer(lowercase , keep_accents=lowercase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self ) -> Tuple:
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
lowerCAmelCase = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@require_torch
@slow
def _snake_case ( self ) -> Tuple:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase = """ """.join(lowercase )
lowerCAmelCase = self.big_tokenizer.encode_plus(lowercase , return_tensors="""pt""" , return_token_type_ids=lowercase )
lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowercase )
lowerCAmelCase = BigBirdConfig(attention_type="""original_full""" )
lowerCAmelCase = BigBirdModel(lowercase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase )
model(**lowercase )
@slow
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
lowerCAmelCase = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def _snake_case ( self ) -> Optional[int]:
# fmt: off
lowerCAmelCase = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 46
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
def __init__( self , *lowercase , **lowercase ) -> None:
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowercase , )
super().__init__(*lowercase , **lowercase )
| 46
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowercase :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = """gelu"""
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def _snake_case ( self ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertModel(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = TFDistilBertForMaskedLM(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertForQuestionAnswering(config=lowercase )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForSequenceClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFDistilBertForMultipleChoice(lowercase )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForTokenClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Dict:
lowerCAmelCase = TFDistilBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , dim=37 )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def _snake_case ( self ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
lowerCAmelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4 )
| 46
| 1
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 46
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
SCREAMING_SNAKE_CASE__ = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase = unk_token if pad_token is None else pad_token
lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase = re.compile(
f'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> str:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _snake_case ( self ) -> int:
return len(self.sp_model )
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = self.non_printing_characters_re.sub("""""" , lowercase )
# Normalize whitespaces
lowerCAmelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase = unicodedata.normalize("""NFC""" , lowercase )
return text
def _snake_case ( self , lowercase , **lowercase ) -> List[str]:
lowerCAmelCase = self.preprocess_text(lowercase )
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.sp_model.PieceToId(lowercase )
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.IdToPiece(lowercase )
@staticmethod
def _snake_case ( lowercase ) -> str:
return out_string
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = []
lowerCAmelCase = """"""
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
lowerCAmelCase = False
out_string += self.sp_model.decode(lowercase )
return out_string
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _snake_case ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowercase , lowercase ):
lowerCAmelCase = self.preprocess_text(lowercase )
lowerCAmelCase = self.sp_model.encode(lowercase )
else:
lowerCAmelCase = [self.preprocess_text(lowercase ) for t in text]
lowerCAmelCase = self.sp_model.encode(lowercase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase = torch.tensor(lowercase )
return token_ids
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.decode(lowercase )
def _snake_case ( self , lowercase ) -> List[int]:
lowerCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowercase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowercase )
| 46
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
def merge(SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(SCREAMING_SNAKE_CASE ) <= 1:
return collection
lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE__ = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 46
|
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
SCREAMING_SNAKE_CASE__ = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
lowerCAmelCase = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _snake_case ( self , lowercase , lowercase ) -> Any:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 46
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=SCREAMING_SNAKE_CASE , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=SCREAMING_SNAKE_CASE , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=SCREAMING_SNAKE_CASE , default=10_00 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE , default=5_12 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCAmelCase = parser.parse_args()
return args
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
def fn(SCREAMING_SNAKE_CASE : Optional[int] ):
return tokenizer(examples["""text"""] )
return fn
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCAmelCase = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCAmelCase = tf.train.Features(feature=SCREAMING_SNAKE_CASE )
lowerCAmelCase = tf.train.Example(features=SCREAMING_SNAKE_CASE )
lowerCAmelCase = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE )
return records
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase = min(len(SCREAMING_SNAKE_CASE ) , args.limit )
lowerCAmelCase = dataset.select(range(SCREAMING_SNAKE_CASE ) )
print(F'Limiting the dataset to {args.limit} entries.' )
lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase = tokenize_function(SCREAMING_SNAKE_CASE )
lowerCAmelCase = dataset.map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE : Any ):
# Concatenate all texts.
lowerCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase = dataset_tokenized.map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , batch_size=10_00 , num_proc=4 )
lowerCAmelCase = 0
lowerCAmelCase = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE ) , args.shard_size ):
lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase = len(dataset_snapshot["""input_ids"""] )
lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , F'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCAmelCase = get_serialized_examples(SCREAMING_SNAKE_CASE )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE )
print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , """w""" ) as f:
print(F'Total {args.split} records: {total_records}' , file=SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
main(args)
| 46
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'imagegpt'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=512 + 1 , lowercase=32 * 32 , lowercase=512 , lowercase=24 , lowercase=8 , lowercase=None , lowercase="quick_gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , **lowercase , ) -> Any:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase , **lowercase )
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _snake_case ( self , lowercase , lowercase = 1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 32 , lowercase = 32 , ) -> Mapping[str, Any]:
lowerCAmelCase = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase )
lowerCAmelCase = dict(preprocessor(images=lowercase , return_tensors=lowercase ) )
return inputs
| 46
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'decision_transformer'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=128 , lowercase=4_096 , lowercase=True , lowercase=1 , lowercase=1_024 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=50_256 , lowercase=50_256 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
lowerCAmelCase = state_dim
lowerCAmelCase = act_dim
lowerCAmelCase = hidden_size
lowerCAmelCase = max_ep_len
lowerCAmelCase = action_tanh
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
| 46
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 46
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=128 , lowercase=32 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> Optional[Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def _snake_case ( self ) -> List[Any]:
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = NezhaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
lowerCAmelCase = model(lowercase , token_type_ids=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = True
lowerCAmelCase = NezhaModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , encoder_hidden_states=lowercase , )
lowerCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
lowerCAmelCase = NezhaForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
lowerCAmelCase = NezhaForNextSentencePrediction(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = NezhaForPreTraining(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = NezhaForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = NezhaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = NezhaForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
lowerCAmelCase = self.num_choices
lowerCAmelCase = NezhaForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Tuple:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> Dict:
lowerCAmelCase = NezhaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = NezhaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """bert.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """bert.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase , attention_mask=lowercase )[0]
lowerCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) )
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase , attention_mask=lowercase )[0]
lowerCAmelCase = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) )
| 46
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCAmelCase = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = nlp
lowerCAmelCase = reader
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self._nlp, []
for entry in self._reader:
lowerCAmelCase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase = self._reader.save_binary(lowercase )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowercase )
| 46
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'speech_to_text_2'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowercase=10_000 , lowercase=6 , lowercase=2_048 , lowercase=4 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1_024 , **lowercase , ) -> Optional[Any]:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
| 46
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False, False, False
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = None
# Automatically constructed
_SCREAMING_SNAKE_CASE = "dict"
_SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_SCREAMING_SNAKE_CASE = field(default='Audio' , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self ) -> Union[str, Any]:
return self.pa_type
def _snake_case ( self , lowercase ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowercase , lowercase ):
return {"bytes": None, "path": value}
elif isinstance(lowercase , lowercase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase = BytesIO()
sf.write(lowercase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowerCAmelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
lowerCAmelCase = BytesIO(bytes() )
sf.write(lowercase , lowercase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _snake_case ( self , lowercase , lowercase = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCAmelCase , lowerCAmelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCAmelCase = xsplitext(lowercase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCAmelCase = token_per_repo_id or {}
lowerCAmelCase = path.split("""::""" )[-1]
try:
lowerCAmelCase = string_to_dict(lowercase , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase = None
with xopen(lowercase , """rb""" , use_auth_token=lowercase ) as f:
lowerCAmelCase , lowerCAmelCase = sf.read(lowercase )
else:
lowerCAmelCase , lowerCAmelCase = sf.read(lowercase )
lowerCAmelCase = array.T
if self.mono:
lowerCAmelCase = librosa.to_mono(lowercase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase = librosa.resample(lowercase , orig_sr=lowercase , target_sr=self.sampling_rate )
lowerCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _snake_case ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _snake_case ( self , lowercase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCAmelCase = pa.array([Audio().encode_example(lowercase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCAmelCase = storage.field("""bytes""" )
else:
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCAmelCase = storage.field("""path""" )
else:
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowercase , self.pa_type )
def _snake_case ( self , lowercase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowercase ):
with xopen(lowercase , """rb""" ) as f:
lowerCAmelCase = f.read()
return bytes_
lowerCAmelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase = pa.array(
[os.path.basename(lowercase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase , self.pa_type )
| 46
| 1
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE__ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase :
def __init__( self , lowercase , lowercase=16 , lowercase=13 , lowercase=7 , lowercase=14 , lowercase=10 , lowercase=19 , lowercase=5 , lowercase=4 , lowercase=True , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=[1, 2, 3, 4, 5] , lowercase=25 , lowercase=5 , ) -> Any:
lowerCAmelCase = d_model
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = prediction_length
lowerCAmelCase = context_length
lowerCAmelCase = cardinality
lowerCAmelCase = num_time_features
lowerCAmelCase = lags_sequence
lowerCAmelCase = embedding_dimension
lowerCAmelCase = is_training
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = context_length
lowerCAmelCase = prediction_length + label_length
lowerCAmelCase = label_length
lowerCAmelCase = moving_average
lowerCAmelCase = autocorrelation_factor
def _snake_case ( self ) -> Optional[int]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = config.context_length + max(config.lags_sequence )
lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _snake_case ( self ) -> str:
lowerCAmelCase = self.get_config()
lowerCAmelCase = self.prepare_autoformer_inputs_dict(lowercase )
return config, inputs_dict
def _snake_case ( self ) -> Any:
lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = AutoformerModel(config=lowercase ).to(lowercase ).eval()
lowerCAmelCase = model(**lowercase )
lowerCAmelCase = outputs.encoder_last_hidden_state
lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(lowercase )
lowerCAmelCase = AutoformerEncoder.from_pretrained(lowercase ).to(lowercase )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = model.create_network_inputs(**lowercase )
lowerCAmelCase , lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCAmelCase = encoder(inputs_embeds=lowercase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(lowercase )
lowerCAmelCase = AutoformerDecoder.from_pretrained(lowercase ).to(lowercase )
lowerCAmelCase = decoder(
trend=lowercase , inputs_embeds=lowercase , encoder_hidden_states=lowercase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (AutoformerForPrediction,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = AutoformerModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def _snake_case ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> str:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
lowerCAmelCase , lowerCAmelCase = model_class.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertEqual(info["""missing_keys"""] , [] )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _snake_case ( self ) -> str:
pass
def _snake_case ( self ) -> int:
lowerCAmelCase = inspect.signature(getattr(lowercase , """forward""" ) )
# The main input is the name of the argument after `self`
lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(lowercase )] , lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , """seq_length""" , lowercase )
lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , lowercase )
lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , lowercase )
lowerCAmelCase = getattr(self.model_tester , """d_model""" , lowercase )
lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , lowercase )
lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCAmelCase = len(lowercase )
lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowercase , lowercase )
# decoder attentions
lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(lowercase , (list, tuple) )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(lowercase , (list, tuple) )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 2 , len(lowercase ) )
lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _snake_case ( self ) -> Any:
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str]="train-batch.pt" ):
'''simple docstring'''
lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowercase )
lowerCAmelCase = prepare_batch()
with torch.no_grad():
lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowercase , atol=lowercase ) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowercase )
lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowercase , atol=lowercase ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowercase )
lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowercase )
lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=lowercase )
lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowercase , rtol=1e-1 ) )
| 46
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if "bot_conv" in key:
lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase = value
return new_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase = rename_keys(SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 46
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
lowerCAmelCase = 0
while len(SCREAMING_SNAKE_CASE ) > 1:
lowerCAmelCase = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCAmelCase = files.index(min(SCREAMING_SNAKE_CASE ) )
temp += files[min_index]
files.pop(SCREAMING_SNAKE_CASE )
files.append(SCREAMING_SNAKE_CASE )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase :
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = inputs["""prompt"""]
lowerCAmelCase = inputs["""generator"""]
lowerCAmelCase = inputs["""num_inference_steps"""]
lowerCAmelCase = inputs["""output_type"""]
if "image" in inputs:
lowerCAmelCase = inputs["""image"""]
else:
lowerCAmelCase = None
if "mask_image" in inputs:
lowerCAmelCase = inputs["""mask_image"""]
else:
lowerCAmelCase = None
if "original_image" in inputs:
lowerCAmelCase = inputs["""original_image"""]
else:
lowerCAmelCase = None
lowerCAmelCase , lowerCAmelCase = pipe.encode_prompt(lowercase )
# inputs with prompt converted to embeddings
lowerCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase = image
if mask_image is not None:
lowerCAmelCase = mask_image
if original_image is not None:
lowerCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase , lowercase , lowercase )
lowerCAmelCase = pipe(**lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase )
lowerCAmelCase = self.pipeline_class.from_pretrained(lowercase )
pipe_loaded.to(lowercase )
pipe_loaded.set_progress_bar_config(disable=lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase , lowercase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = inputs["""generator"""]
lowerCAmelCase = inputs["""num_inference_steps"""]
lowerCAmelCase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase = image
if mask_image is not None:
lowerCAmelCase = mask_image
if original_image is not None:
lowerCAmelCase = original_image
lowerCAmelCase = pipe_loaded(**lowercase )[0]
lowerCAmelCase = np.abs(to_np(lowercase ) - to_np(lowercase ) ).max()
self.assertLess(lowercase , 1e-4 )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = pipe(**lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase )
lowerCAmelCase = self.pipeline_class.from_pretrained(lowercase )
pipe_loaded.to(lowercase )
pipe_loaded.set_progress_bar_config(disable=lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = pipe_loaded(**lowercase )[0]
lowerCAmelCase = np.abs(to_np(lowercase ) - to_np(lowercase ) ).max()
self.assertLess(lowercase , 1e-4 )
| 46
| 1
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'encodec'
def __init__( self , lowercase=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase=24_000 , lowercase=1 , lowercase=False , lowercase=None , lowercase=None , lowercase=128 , lowercase=32 , lowercase=1 , lowercase=[8, 5, 4, 2] , lowercase="weight_norm" , lowercase=7 , lowercase=7 , lowercase=3 , lowercase=2 , lowercase=True , lowercase="reflect" , lowercase=2 , lowercase=2 , lowercase=1.0 , lowercase=1_024 , lowercase=None , lowercase=True , **lowercase , ) -> Any:
lowerCAmelCase = target_bandwidths
lowerCAmelCase = sampling_rate
lowerCAmelCase = audio_channels
lowerCAmelCase = normalize
lowerCAmelCase = chunk_length_s
lowerCAmelCase = overlap
lowerCAmelCase = hidden_size
lowerCAmelCase = num_filters
lowerCAmelCase = num_residual_layers
lowerCAmelCase = upsampling_ratios
lowerCAmelCase = norm_type
lowerCAmelCase = kernel_size
lowerCAmelCase = last_kernel_size
lowerCAmelCase = residual_kernel_size
lowerCAmelCase = dilation_growth_rate
lowerCAmelCase = use_causal_conv
lowerCAmelCase = pad_mode
lowerCAmelCase = compress
lowerCAmelCase = num_lstm_layers
lowerCAmelCase = trim_right_ratio
lowerCAmelCase = codebook_size
lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**lowercase )
@property
def _snake_case ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _snake_case ( self ) -> int:
lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _snake_case ( self ) -> int:
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 46
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'summarization'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = 'rouge2'
def __init__( self , lowercase , **lowercase ) -> str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase = 0
lowerCAmelCase = defaultdict(lowercase )
lowerCAmelCase = self.config.model_type
lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase = get_git_info()["""repo_sha"""]
lowerCAmelCase = hparams.num_workers
lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase = self.decoder_start_token_id
lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCAmelCase = False
lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase = self.hparams.eval_max_gen_length
else:
lowerCAmelCase = self.model.config.max_length
lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _snake_case ( self , lowercase ) -> Dict[str, List[str]]:
lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCAmelCase = True
return readable_batch
def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]:
return self.model(lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = self.tokenizer.pad_token_id
lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , lowercase ):
lowerCAmelCase = self.model._shift_right(lowercase )
else:
lowerCAmelCase = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase = decoder_input_ids
self.save_readable_batch(lowercase )
lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 )
lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def _snake_case ( self ) -> int:
return self.tokenizer.pad_token_id
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].shape[0]
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase , lowercase="val" ) -> Dict:
self.step_count += 1
lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase = losses["""loss"""]
lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return calculate_rouge(lowercase , lowercase )
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCAmelCase = self.ids_to_clean_text(lowercase )
lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase )
lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.validation_epoch_end(lowercase , prefix="""test""" )
def _snake_case ( self , lowercase ) -> SeqaSeqDataset:
lowerCAmelCase = self.n_obs[type_path]
lowerCAmelCase = self.target_lens[type_path]
lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCAmelCase = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def _snake_case ( self ) -> DataLoader:
lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase )
parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'translation'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ['bleu']
_SCREAMING_SNAKE_CASE = 'bleu'
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
super().__init__(lowercase , **lowercase )
lowerCAmelCase = hparams.src_lang
lowerCAmelCase = hparams.tgt_lang
def _snake_case ( self , lowercase , lowercase ) -> dict:
return calculate_bleu(lowercase , lowercase )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE )
lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase = False
lowerCAmelCase = args.val_metric == """loss"""
lowerCAmelCase = generic_train(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCAmelCase = """"""
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) )
if checkpoints:
lowerCAmelCase = checkpoints[-1]
lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 46
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = KandinskyVaaImgaImgPipeline
_SCREAMING_SNAKE_CASE = ['image_embeds', 'negative_image_embeds', 'image']
_SCREAMING_SNAKE_CASE = [
'image_embeds',
'negative_image_embeds',
'image',
]
_SCREAMING_SNAKE_CASE = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_SCREAMING_SNAKE_CASE = False
@property
def _snake_case ( self ) -> str:
return 32
@property
def _snake_case ( self ) -> Optional[int]:
return 32
@property
def _snake_case ( self ) -> Optional[Any]:
return self.time_input_dim
@property
def _snake_case ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def _snake_case ( self ) -> Optional[int]:
return 100
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase = UNetaDConditionModel(**lowercase )
return model
@property
def _snake_case ( self ) -> Dict:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self ) -> str:
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCAmelCase = DDIMScheduler(**lowercase )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _snake_case ( self , lowercase , lowercase=0 ) -> Any:
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(lowercase ) ).convert("""RGB""" ).resize((256, 256) )
if str(lowercase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase )
else:
lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _snake_case ( self ) -> int:
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
lowerCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = pipe(**self.get_dummy_inputs(lowercase ) )
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCAmelCase = """A red cartoon frog, 4k"""
lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase = pipeline(
image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 46
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46
| 1
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
SCREAMING_SNAKE_CASE__ = True
except (ImportError, AttributeError):
SCREAMING_SNAKE_CASE__ = object
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
pass
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = logging.get_logger("transformers-cli/serving")
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Namespace ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(SCREAMING_SNAKE_CASE , args.host , args.port , args.workers )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase ):
@staticmethod
def _snake_case ( lowercase ) -> Any:
lowerCAmelCase = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=lowercase , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=lowercase , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=lowercase , default=8_888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=lowercase , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=lowercase , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=lowercase , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=lowercase , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
lowerCAmelCase = pipeline
lowerCAmelCase = host
lowerCAmelCase = port
lowerCAmelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f'Serving model over {host}:{port}' )
lowerCAmelCase = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=lowercase , response_class=lowercase , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=lowercase , response_class=lowercase , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=lowercase , response_class=lowercase , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=lowercase , response_class=lowercase , methods=["""POST"""] , ),
] , timeout=600 , )
def _snake_case ( self ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _snake_case ( self ) -> List[Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _snake_case ( self , lowercase = Body(lowercase , embed=lowercase ) , lowercase = Body(lowercase , embed=lowercase ) ) -> Dict:
try:
lowerCAmelCase = self._pipeline.tokenizer.tokenize(lowercase )
if return_ids:
lowerCAmelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowercase )
return ServeTokenizeResult(tokens=lowercase , tokens_ids=lowercase )
else:
return ServeTokenizeResult(tokens=lowercase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(lowercase )} )
def _snake_case ( self , lowercase = Body(lowercase , embed=lowercase ) , lowercase = Body(lowercase , embed=lowercase ) , lowercase = Body(lowercase , embed=lowercase ) , ) -> Any:
try:
lowerCAmelCase = self._pipeline.tokenizer.decode(lowercase , lowercase , lowercase )
return ServeDeTokenizeResult(model="""""" , text=lowercase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(lowercase )} )
async def _snake_case ( self , lowercase=Body(lowercase , embed=lowercase ) ) -> str:
# Check we don't have empty string
if len(lowercase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCAmelCase = self._pipeline(lowercase )
return ServeForwardResult(output=lowercase )
except Exception as e:
raise HTTPException(500 , {"""error""": str(lowercase )} )
| 46
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
SCREAMING_SNAKE_CASE__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
SCREAMING_SNAKE_CASE__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _snake_case ( self , lowercase , lowercase , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ) -> Optional[Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in predictions] )
lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in references] )
else:
lowerCAmelCase = np.asarray(lowercase )
lowerCAmelCase = np.asarray(lowercase )
if ignore_case:
lowerCAmelCase = np.char.lower(lowercase )
lowerCAmelCase = np.char.lower(lowercase )
if ignore_punctuation:
lowerCAmelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
if ignore_numbers:
lowerCAmelCase = string.digits.maketrans("""""" , """""" , string.digits )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = predictions == references
return {"exact_match": np.mean(lowercase ) * 100}
| 46
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'philschmid/bart-large-cnn-samsum'
_SCREAMING_SNAKE_CASE = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
_SCREAMING_SNAKE_CASE = 'summarizer'
_SCREAMING_SNAKE_CASE = AutoTokenizer
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE = ['text']
_SCREAMING_SNAKE_CASE = ['text']
def _snake_case ( self , lowercase ) -> List[Any]:
return self.pre_processor(lowercase , return_tensors="""pt""" , truncation=lowercase )
def _snake_case ( self , lowercase ) -> Dict:
return self.model.generate(**lowercase )[0]
def _snake_case ( self , lowercase ) -> str:
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
| 46
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , SCREAMING_SNAKE_CASE )
print("""Decoded:""" , decode(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 46
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'summarization'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = 'rouge2'
def __init__( self , lowercase , **lowercase ) -> str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase = 0
lowerCAmelCase = defaultdict(lowercase )
lowerCAmelCase = self.config.model_type
lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase = get_git_info()["""repo_sha"""]
lowerCAmelCase = hparams.num_workers
lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase = self.decoder_start_token_id
lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCAmelCase = False
lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase = self.hparams.eval_max_gen_length
else:
lowerCAmelCase = self.model.config.max_length
lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _snake_case ( self , lowercase ) -> Dict[str, List[str]]:
lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCAmelCase = True
return readable_batch
def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]:
return self.model(lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = self.tokenizer.pad_token_id
lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , lowercase ):
lowerCAmelCase = self.model._shift_right(lowercase )
else:
lowerCAmelCase = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase = decoder_input_ids
self.save_readable_batch(lowercase )
lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 )
lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def _snake_case ( self ) -> int:
return self.tokenizer.pad_token_id
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].shape[0]
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase , lowercase="val" ) -> Dict:
self.step_count += 1
lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase = losses["""loss"""]
lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return calculate_rouge(lowercase , lowercase )
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCAmelCase = self.ids_to_clean_text(lowercase )
lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase )
lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.validation_epoch_end(lowercase , prefix="""test""" )
def _snake_case ( self , lowercase ) -> SeqaSeqDataset:
lowerCAmelCase = self.n_obs[type_path]
lowerCAmelCase = self.target_lens[type_path]
lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCAmelCase = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def _snake_case ( self ) -> DataLoader:
lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase )
parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'translation'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ['bleu']
_SCREAMING_SNAKE_CASE = 'bleu'
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
super().__init__(lowercase , **lowercase )
lowerCAmelCase = hparams.src_lang
lowerCAmelCase = hparams.tgt_lang
def _snake_case ( self , lowercase , lowercase ) -> dict:
return calculate_bleu(lowercase , lowercase )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE )
lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase = False
lowerCAmelCase = args.val_metric == """loss"""
lowerCAmelCase = generic_train(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCAmelCase = """"""
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) )
if checkpoints:
lowerCAmelCase = checkpoints[-1]
lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 46
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 1_00 ):
'''simple docstring'''
lowerCAmelCase = n * (n + 1) * (2 * n + 1) / 6
lowerCAmelCase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 46
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while b:
lowerCAmelCase , lowerCAmelCase = b, a % b
return a
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE , a % b )
def UpperCAmelCase__ ( ):
'''simple docstring'''
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 46
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = 0
for ch in input_str:
lowerCAmelCase = ord(SCREAMING_SNAKE_CASE )
lowerCAmelCase = pow(2 , SCREAMING_SNAKE_CASE )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
SCREAMING_SNAKE_CASE__ = {
"google/pegasus-xsum": 512,
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<mask_2>" , lowercase="<mask_1>" , lowercase=None , lowercase=103 , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowercase )}, but is'
f' {type(lowercase )}' )
lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCAmelCase = additional_special_tokens_extended
else:
lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , pad_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = mask_token_sent
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# add special tokens to encoder dict
lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def _snake_case ( self ) -> int:
return len(self.sp_model ) + self.offset
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase = self.sp_model.piece_to_id(lowercase )
return sp_id + self.offset
def _snake_case ( self , lowercase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _snake_case ( self , lowercase=False ) -> Tuple:
return 1
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , lowercase , lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 46
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'longformer'
def __init__( self , lowercase = 512 , lowercase = 2 , lowercase = 1 , lowercase = 0 , lowercase = 2 , lowercase = 30_522 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3_072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 2 , lowercase = 0.02 , lowercase = 1e-12 , lowercase = False , **lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = attention_window
lowerCAmelCase = sep_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = onnx_export
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase = "default" , lowercase = None ) -> Tuple:
super().__init__(lowercase , lowercase , lowercase )
lowerCAmelCase = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = super().outputs
if self.task == "default":
lowerCAmelCase = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1e-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowerCAmelCase = 1
return inputs
| 46
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'longformer'
def __init__( self , lowercase = 512 , lowercase = 2 , lowercase = 1 , lowercase = 0 , lowercase = 2 , lowercase = 30_522 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3_072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 2 , lowercase = 0.02 , lowercase = 1e-12 , lowercase = False , **lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = attention_window
lowerCAmelCase = sep_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = onnx_export
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase = "default" , lowercase = None ) -> Tuple:
super().__init__(lowercase , lowercase , lowercase )
lowerCAmelCase = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = super().outputs
if self.task == "default":
lowerCAmelCase = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1e-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowerCAmelCase = 1
return inputs
| 46
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : bytes ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE )
lowerCAmelCase = """""".join(bin(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCAmelCase = B"""=""" * ((6 - len(SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE ) % 6)
else:
lowerCAmelCase = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
"""argument should be a bytes-like object or ASCII string, """
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowerCAmelCase = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCAmelCase = encoded_data[:-padding]
lowerCAmelCase = """""".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCAmelCase = """""".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowerCAmelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 3 , lowercase = 32 , lowercase = 256 , lowercase = 32 , lowercase = None , lowercase = 0.18_215 , lowercase = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
lowerCAmelCase = VectorQuantizer(lowercase , lowercase , beta=0.25 , remap=lowercase , sane_index_shape=lowercase )
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , norm_type=lowercase , )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = True ) -> VQEncoderOutput:
lowerCAmelCase = self.encoder(lowercase )
lowerCAmelCase = self.quant_conv(lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = False , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.quantize(lowercase )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(lowercase )
lowerCAmelCase = self.decoder(lowercase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
def _snake_case ( self , lowercase , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase = sample
lowerCAmelCase = self.encode(lowercase ).latents
lowerCAmelCase = self.decode(lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
| 46
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'roformer'
def __init__( self , lowercase=50_000 , lowercase=None , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3_072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1_536 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=0 , lowercase=False , lowercase=True , **lowercase , ) -> Dict:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size if embedding_size is None else embedding_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = rotary_value
lowerCAmelCase = use_cache
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 46
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """Morse code here!"""
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase = encrypt(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase = decrypt(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 46
| 1
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
SCREAMING_SNAKE_CASE__ = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
SCREAMING_SNAKE_CASE__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
SCREAMING_SNAKE_CASE__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Any="dummy_doc" ):
'''simple docstring'''
lowerCAmelCase = {doc: key_lines}
lowerCAmelCase = {doc: sys_lines}
lowerCAmelCase = {}
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase , lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if remove_nested:
lowerCAmelCase , lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCAmelCase , lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = get_coref_infos(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = 0
lowerCAmelCase = 0
for name, metric in metrics:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , F'Recall: {recall * 1_00:.2f}' , F' Precision: {precision * 1_00:.2f}' , F' F1: {fa * 1_00:.2f}' , )
if conll_subparts_num == 3:
lowerCAmelCase = (conll / 3) * 1_00
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
lowerCAmelCase = line.split()[5]
if not parse_col == "-":
lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _snake_case ( self , lowercase , lowercase , lowercase=True , lowercase=False , lowercase=False , lowercase=False ) -> str:
lowerCAmelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
lowerCAmelCase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCAmelCase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 46
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> List[str]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowercase ):
lowerCAmelCase = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
lowerCAmelCase = FlaxAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def _snake_case ( self ) -> List[Any]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowercase ):
lowerCAmelCase = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
lowerCAmelCase = FlaxAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def _snake_case ( self ) -> Dict:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase )
lowerCAmelCase = FlaxBertModel.from_pretrained(lowercase )
lowerCAmelCase = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowercase ):
return model(**lowercase )
eval(**lowercase ).block_until_ready()
@slow
def _snake_case ( self ) -> str:
for model_name in ["roberta-base", "roberta-large"]:
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase )
lowerCAmelCase = FlaxRobertaModel.from_pretrained(lowercase )
lowerCAmelCase = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowercase ):
return model(**lowercase )
eval(**lowercase ).block_until_ready()
def _snake_case ( self ) -> Dict:
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase = FlaxAutoModel.from_pretrained("""bert-base""" )
def _snake_case ( self ) -> int:
with self.assertRaisesRegex(
lowercase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase = FlaxAutoModel.from_pretrained(lowercase , revision="""aaaaaa""" )
def _snake_case ( self ) -> Tuple:
with self.assertRaisesRegex(
lowercase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
lowerCAmelCase = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _snake_case ( self ) -> Dict:
with self.assertRaisesRegex(lowercase , """Use `from_pt=True` to load this model""" ):
lowerCAmelCase = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 46
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = FlaubertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lengths=lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = FlaubertWithLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = FlaubertForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = FlaubertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """traced_model.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 46
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['input_features', 'attention_mask']
def __init__( self , lowercase=80 , lowercase=16_000 , lowercase=80 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase=True , **lowercase , ) -> Optional[Any]:
super().__init__(feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , **lowercase )
lowerCAmelCase = num_mel_bins
lowerCAmelCase = do_ceptral_normalize
lowerCAmelCase = normalize_means
lowerCAmelCase = normalize_vars
lowerCAmelCase = True
def _snake_case ( self , lowercase , ) -> np.ndarray:
lowerCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase = torch.from_numpy(lowercase ).unsqueeze(0 )
lowerCAmelCase = ta_kaldi.fbank(lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _snake_case ( lowercase , lowercase , lowercase = True , lowercase = True , lowercase = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
lowerCAmelCase = x[:input_length].mean(axis=0 )
lowerCAmelCase = np.subtract(lowercase , lowercase )
if normalize_vars:
lowerCAmelCase = x[:input_length].std(axis=0 )
lowerCAmelCase = np.divide(lowercase , lowercase )
if input_length < x.shape[0]:
lowerCAmelCase = padding_value
# make sure array is in float32
lowerCAmelCase = x.astype(np.floataa )
return x
def _snake_case ( self , lowercase , lowercase = None ) -> List[np.ndarray]:
lowerCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase , lowercase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowercase , lowercase )
]
def __call__( self , lowercase , lowercase = False , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowerCAmelCase = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [raw_speech]
# extract fbank features
lowerCAmelCase = [self._extract_fbank_features(lowercase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase = BatchFeature({"""input_features""": features} )
lowerCAmelCase = self.pad(
lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , **lowercase , )
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , lowercase ):
lowerCAmelCase = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_features]
lowerCAmelCase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCAmelCase = [np.asarray(lowercase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase = (
np.array(lowercase , dtype=np.intaa )
if self._get_padding_strategies(lowercase , max_length=lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase = self.normalize(
padded_inputs["""input_features"""] , attention_mask=lowercase )
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
| 46
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = BigBirdTokenizer
_SCREAMING_SNAKE_CASE = BigBirdTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def _snake_case ( self ) -> List[str]:
super().setUp()
lowerCAmelCase = self.tokenizer_class(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = """<s>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(lowercase ) , 1_004 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def _snake_case ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(lowercase )
lowerCAmelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = BigBirdTokenizer(lowercase , keep_accents=lowercase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self ) -> Tuple:
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
lowerCAmelCase = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@require_torch
@slow
def _snake_case ( self ) -> Tuple:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase = """ """.join(lowercase )
lowerCAmelCase = self.big_tokenizer.encode_plus(lowercase , return_tensors="""pt""" , return_token_type_ids=lowercase )
lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowercase )
lowerCAmelCase = BigBirdConfig(attention_type="""original_full""" )
lowerCAmelCase = BigBirdModel(lowercase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase )
model(**lowercase )
@slow
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
lowerCAmelCase = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def _snake_case ( self ) -> Optional[int]:
# fmt: off
lowerCAmelCase = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 46
| 1
|
"""simple docstring"""
import random
from typing import Any
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
lowerCAmelCase = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
lowerCAmelCase , lowerCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE__ = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 46
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowercase :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = """gelu"""
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def _snake_case ( self ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertModel(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = TFDistilBertForMaskedLM(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertForQuestionAnswering(config=lowercase )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForSequenceClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFDistilBertForMultipleChoice(lowercase )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForTokenClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Dict:
lowerCAmelCase = TFDistilBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , dim=37 )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def _snake_case ( self ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
lowerCAmelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4 )
| 46
| 1
|
"""simple docstring"""
import torch
def UpperCAmelCase__ ( ):
'''simple docstring'''
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 46
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
SCREAMING_SNAKE_CASE__ = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase = unk_token if pad_token is None else pad_token
lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase = re.compile(
f'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> str:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _snake_case ( self ) -> int:
return len(self.sp_model )
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = self.non_printing_characters_re.sub("""""" , lowercase )
# Normalize whitespaces
lowerCAmelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase = unicodedata.normalize("""NFC""" , lowercase )
return text
def _snake_case ( self , lowercase , **lowercase ) -> List[str]:
lowerCAmelCase = self.preprocess_text(lowercase )
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.sp_model.PieceToId(lowercase )
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.IdToPiece(lowercase )
@staticmethod
def _snake_case ( lowercase ) -> str:
return out_string
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = []
lowerCAmelCase = """"""
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
lowerCAmelCase = False
out_string += self.sp_model.decode(lowercase )
return out_string
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _snake_case ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowercase , lowercase ):
lowerCAmelCase = self.preprocess_text(lowercase )
lowerCAmelCase = self.sp_model.encode(lowercase )
else:
lowerCAmelCase = [self.preprocess_text(lowercase ) for t in text]
lowerCAmelCase = self.sp_model.encode(lowercase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase = torch.tensor(lowercase )
return token_ids
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.decode(lowercase )
def _snake_case ( self , lowercase ) -> List[int]:
lowerCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowercase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowercase )
| 46
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'sequence-classification'
def __init__( self , lowercase ) -> Optional[Any]:
if type(lowercase ) == dict:
lowerCAmelCase = Namespace(**lowercase )
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def _snake_case ( self , **lowercase ) -> List[str]:
return self.model(**lowercase )
def _snake_case ( self , lowercase , lowercase ) -> List[str]:
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**lowercase )
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowercase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
lowerCAmelCase = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , lowercase )
torch.save(lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(lowercase )
logger.info("""Loading features from cached file %s""" , lowercase )
lowerCAmelCase = torch.load(lowercase )
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def _snake_case ( self , lowercase , lowercase ) -> str:
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**lowercase )
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self , lowercase ) -> tuple:
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(lowercase )
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
lowerCAmelCase = dict(results.items() )
lowerCAmelCase = results
return ret, preds_list, out_label_list
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(lowercase )
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(lowercase )
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=lowercase , required=lowercase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowercase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(SCREAMING_SNAKE_CASE , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(SCREAMING_SNAKE_CASE , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , F'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(SCREAMING_SNAKE_CASE )
lowerCAmelCase = generic_train(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 46
|
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
SCREAMING_SNAKE_CASE__ = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
lowerCAmelCase = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _snake_case ( self , lowercase , lowercase ) -> Any:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 46
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'imagegpt'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=512 + 1 , lowercase=32 * 32 , lowercase=512 , lowercase=24 , lowercase=8 , lowercase=None , lowercase="quick_gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , **lowercase , ) -> Any:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase , **lowercase )
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _snake_case ( self , lowercase , lowercase = 1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 32 , lowercase = 32 , ) -> Mapping[str, Any]:
lowerCAmelCase = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase )
lowerCAmelCase = dict(preprocessor(images=lowercase , return_tensors=lowercase ) )
return inputs
| 46
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if "cls_token" in name:
lowerCAmelCase = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowerCAmelCase = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowerCAmelCase = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCAmelCase = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowerCAmelCase = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowerCAmelCase = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowerCAmelCase = key.split(""".""" )
lowerCAmelCase = int(key_split[1] )
if "decoder_blocks" in key:
lowerCAmelCase = config.decoder_hidden_size
lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
else:
lowerCAmelCase = config.hidden_size
lowerCAmelCase = """vit.encoder.layer."""
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
else:
lowerCAmelCase = val
return orig_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCAmelCase = 10_24
lowerCAmelCase = 40_96
lowerCAmelCase = 24
lowerCAmelCase = 16
elif "huge" in checkpoint_url:
lowerCAmelCase = 14
lowerCAmelCase = 12_80
lowerCAmelCase = 51_20
lowerCAmelCase = 32
lowerCAmelCase = 16
lowerCAmelCase = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits
if "large" in checkpoint_url:
lowerCAmelCase = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
lowerCAmelCase = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
lowerCAmelCase = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 46
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 46
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCAmelCase = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = nlp
lowerCAmelCase = reader
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self._nlp, []
for entry in self._reader:
lowerCAmelCase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase = self._reader.save_binary(lowercase )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowercase )
| 46
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=_UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['speech']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""speech"""] )
class lowercase ( metaclass=_UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['speech']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""speech"""] )
| 46
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False, False, False
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = None
# Automatically constructed
_SCREAMING_SNAKE_CASE = "dict"
_SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_SCREAMING_SNAKE_CASE = field(default='Audio' , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self ) -> Union[str, Any]:
return self.pa_type
def _snake_case ( self , lowercase ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowercase , lowercase ):
return {"bytes": None, "path": value}
elif isinstance(lowercase , lowercase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase = BytesIO()
sf.write(lowercase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowerCAmelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
lowerCAmelCase = BytesIO(bytes() )
sf.write(lowercase , lowercase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _snake_case ( self , lowercase , lowercase = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCAmelCase , lowerCAmelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCAmelCase = xsplitext(lowercase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCAmelCase = token_per_repo_id or {}
lowerCAmelCase = path.split("""::""" )[-1]
try:
lowerCAmelCase = string_to_dict(lowercase , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase = None
with xopen(lowercase , """rb""" , use_auth_token=lowercase ) as f:
lowerCAmelCase , lowerCAmelCase = sf.read(lowercase )
else:
lowerCAmelCase , lowerCAmelCase = sf.read(lowercase )
lowerCAmelCase = array.T
if self.mono:
lowerCAmelCase = librosa.to_mono(lowercase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase = librosa.resample(lowercase , orig_sr=lowercase , target_sr=self.sampling_rate )
lowerCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _snake_case ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _snake_case ( self , lowercase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCAmelCase = pa.array([Audio().encode_example(lowercase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCAmelCase = storage.field("""bytes""" )
else:
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCAmelCase = storage.field("""path""" )
else:
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowercase , self.pa_type )
def _snake_case ( self , lowercase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowercase ):
with xopen(lowercase , """rb""" ) as f:
lowerCAmelCase = f.read()
return bytes_
lowerCAmelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase = pa.array(
[os.path.basename(lowercase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase , self.pa_type )
| 46
| 1
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE__ = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = test_results.split(""" """ )
lowerCAmelCase = 0
lowerCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(SCREAMING_SNAKE_CASE ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = None
lowerCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = True
lowerCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
lowerCAmelCase = line
lowerCAmelCase = False
return failures
class lowercase :
def __init__( self , lowercase , lowercase ) -> List[str]:
lowerCAmelCase = title
lowerCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
lowerCAmelCase = doc_test_results["""success"""]
lowerCAmelCase = doc_test_results["""failures"""]
lowerCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCAmelCase = doc_test_results
@property
def _snake_case ( self ) -> str:
lowerCAmelCase = [self._time_spent]
lowerCAmelCase = 0
for time in time_spent:
lowerCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowercase ) == 1:
lowerCAmelCase = [0, 0, time_parts[0]]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return f'{int(lowercase )}h{int(lowercase )}m{int(lowercase )}s'
@property
def _snake_case ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _snake_case ( self ) -> Dict:
lowerCAmelCase = 40
lowerCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(lowercase , lowercase )}
lowerCAmelCase = """"""
for category, failures in category_failures.items():
if len(lowercase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowercase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _snake_case ( self ) -> str:
lowerCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowercase )
@staticmethod
def _snake_case ( ) -> int:
lowerCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(lowercase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowercase , )
def _snake_case ( self ) -> Union[str, Any]:
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
lowerCAmelCase = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else """All tests passed."""
lowerCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowercase , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCAmelCase = """"""
for key, value in failures.items():
lowerCAmelCase = value[:200] + """ [Truncated]""" if len(lowercase ) > 250 else value
failures_text += f'*{key}*\n_{value}_\n\n'
lowerCAmelCase = job_name
lowerCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
lowerCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self ) -> List[str]:
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
lowerCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
lowerCAmelCase = sorted(self.doc_test_results.items() , key=lambda lowercase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
lowerCAmelCase = f'*Num failures* :{len(job_result["failed"] )} \n'
lowerCAmelCase = job_result["""failures"""]
lowerCAmelCase = self.get_reply_blocks(lowercase , lowercase , lowercase , text=lowercase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f'Results for {job}' , blocks=lowercase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = os.environ["""GITHUB_RUN_ID"""]
lowerCAmelCase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE ).json()
lowerCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , SCREAMING_SNAKE_CASE )
return {}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = {}
if os.path.exists(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE )
for file in files:
try:
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}.' ) from e
return _artifact
def UpperCAmelCase__ ( ):
'''simple docstring'''
class lowercase :
def __init__( self , lowercase ) -> Optional[int]:
lowerCAmelCase = name
lowerCAmelCase = []
def __str__( self ) -> Union[str, Any]:
return self.name
def _snake_case ( self , lowercase ) -> Optional[Any]:
self.paths.append({"""name""": self.name, """path""": path} )
lowerCAmelCase = {}
lowerCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCAmelCase = directory
if artifact_name not in _available_artifacts:
lowerCAmelCase = Artifact(SCREAMING_SNAKE_CASE )
_available_artifacts[artifact_name].add_path(SCREAMING_SNAKE_CASE )
return _available_artifacts
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_job_links()
SCREAMING_SNAKE_CASE__ = retrieve_available_artifacts()
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
SCREAMING_SNAKE_CASE__ = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
SCREAMING_SNAKE_CASE__ = github_actions_job_links.get("run_doctests")
SCREAMING_SNAKE_CASE__ = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
SCREAMING_SNAKE_CASE__ = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = handle_test_results(artifact["stats"])
SCREAMING_SNAKE_CASE__ = failed
SCREAMING_SNAKE_CASE__ = success
SCREAMING_SNAKE_CASE__ = time_spent[1:-1] + ", "
SCREAMING_SNAKE_CASE__ = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
SCREAMING_SNAKE_CASE__ = line.replace("FAILED ", "")
SCREAMING_SNAKE_CASE__ = line.split()[0].replace("\n", "")
if "::" in line:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = line.split("::")
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
SCREAMING_SNAKE_CASE__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
SCREAMING_SNAKE_CASE__ = all_failures[test] if test in all_failures else "N/A"
SCREAMING_SNAKE_CASE__ = failure
break
SCREAMING_SNAKE_CASE__ = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 46
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if "bot_conv" in key:
lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase = value
return new_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase = rename_keys(SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 46
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 46
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase :
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = inputs["""prompt"""]
lowerCAmelCase = inputs["""generator"""]
lowerCAmelCase = inputs["""num_inference_steps"""]
lowerCAmelCase = inputs["""output_type"""]
if "image" in inputs:
lowerCAmelCase = inputs["""image"""]
else:
lowerCAmelCase = None
if "mask_image" in inputs:
lowerCAmelCase = inputs["""mask_image"""]
else:
lowerCAmelCase = None
if "original_image" in inputs:
lowerCAmelCase = inputs["""original_image"""]
else:
lowerCAmelCase = None
lowerCAmelCase , lowerCAmelCase = pipe.encode_prompt(lowercase )
# inputs with prompt converted to embeddings
lowerCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase = image
if mask_image is not None:
lowerCAmelCase = mask_image
if original_image is not None:
lowerCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase , lowercase , lowercase )
lowerCAmelCase = pipe(**lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase )
lowerCAmelCase = self.pipeline_class.from_pretrained(lowercase )
pipe_loaded.to(lowercase )
pipe_loaded.set_progress_bar_config(disable=lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase , lowercase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = inputs["""generator"""]
lowerCAmelCase = inputs["""num_inference_steps"""]
lowerCAmelCase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase = image
if mask_image is not None:
lowerCAmelCase = mask_image
if original_image is not None:
lowerCAmelCase = original_image
lowerCAmelCase = pipe_loaded(**lowercase )[0]
lowerCAmelCase = np.abs(to_np(lowercase ) - to_np(lowercase ) ).max()
self.assertLess(lowercase , 1e-4 )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = pipe(**lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase )
lowerCAmelCase = self.pipeline_class.from_pretrained(lowercase )
pipe_loaded.to(lowercase )
pipe_loaded.set_progress_bar_config(disable=lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = pipe_loaded(**lowercase )[0]
lowerCAmelCase = np.abs(to_np(lowercase ) - to_np(lowercase ) ).max()
self.assertLess(lowercase , 1e-4 )
| 46
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'summarization'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = 'rouge2'
def __init__( self , lowercase , **lowercase ) -> str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase = 0
lowerCAmelCase = defaultdict(lowercase )
lowerCAmelCase = self.config.model_type
lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase = get_git_info()["""repo_sha"""]
lowerCAmelCase = hparams.num_workers
lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase = self.decoder_start_token_id
lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCAmelCase = False
lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase = self.hparams.eval_max_gen_length
else:
lowerCAmelCase = self.model.config.max_length
lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _snake_case ( self , lowercase ) -> Dict[str, List[str]]:
lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCAmelCase = True
return readable_batch
def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]:
return self.model(lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = self.tokenizer.pad_token_id
lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , lowercase ):
lowerCAmelCase = self.model._shift_right(lowercase )
else:
lowerCAmelCase = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase = decoder_input_ids
self.save_readable_batch(lowercase )
lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 )
lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def _snake_case ( self ) -> int:
return self.tokenizer.pad_token_id
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].shape[0]
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase , lowercase="val" ) -> Dict:
self.step_count += 1
lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase = losses["""loss"""]
lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return calculate_rouge(lowercase , lowercase )
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCAmelCase = self.ids_to_clean_text(lowercase )
lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase )
lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.validation_epoch_end(lowercase , prefix="""test""" )
def _snake_case ( self , lowercase ) -> SeqaSeqDataset:
lowerCAmelCase = self.n_obs[type_path]
lowerCAmelCase = self.target_lens[type_path]
lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCAmelCase = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def _snake_case ( self ) -> DataLoader:
lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase )
parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'translation'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ['bleu']
_SCREAMING_SNAKE_CASE = 'bleu'
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
super().__init__(lowercase , **lowercase )
lowerCAmelCase = hparams.src_lang
lowerCAmelCase = hparams.tgt_lang
def _snake_case ( self , lowercase , lowercase ) -> dict:
return calculate_bleu(lowercase , lowercase )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE )
lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase = False
lowerCAmelCase = args.val_metric == """loss"""
lowerCAmelCase = generic_train(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCAmelCase = """"""
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) )
if checkpoints:
lowerCAmelCase = checkpoints[-1]
lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 46
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
def get_matched_characters(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> str:
lowerCAmelCase = []
lowerCAmelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCAmelCase = int(max(0 , i - limit ) )
lowerCAmelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase = F'{_stra[0:_stra.index(SCREAMING_SNAKE_CASE )]} {_stra[_stra.index(SCREAMING_SNAKE_CASE ) + 1:]}'
return "".join(SCREAMING_SNAKE_CASE )
# matching characters
lowerCAmelCase = get_matched_characters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = get_matched_characters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
# transposition
lowerCAmelCase = (
len([(ca, ca) for ca, ca in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if ca != ca] ) // 2
)
if not match_count:
lowerCAmelCase = 0.0
else:
lowerCAmelCase = (
1
/ 3
* (
match_count / len(SCREAMING_SNAKE_CASE )
+ match_count / len(SCREAMING_SNAKE_CASE )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCAmelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 46
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46
| 1
|
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Callable ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE )
def _inner_fn(*SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE , )
return fn(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return _inner_fn
| 46
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
SCREAMING_SNAKE_CASE__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
SCREAMING_SNAKE_CASE__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _snake_case ( self , lowercase , lowercase , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ) -> Optional[Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in predictions] )
lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in references] )
else:
lowerCAmelCase = np.asarray(lowercase )
lowerCAmelCase = np.asarray(lowercase )
if ignore_case:
lowerCAmelCase = np.char.lower(lowercase )
lowerCAmelCase = np.char.lower(lowercase )
if ignore_punctuation:
lowerCAmelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
if ignore_numbers:
lowerCAmelCase = string.digits.maketrans("""""" , """""" , string.digits )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = predictions == references
return {"exact_match": np.mean(lowercase ) * 100}
| 46
| 1
|
"""simple docstring"""
import os
import sys
SCREAMING_SNAKE_CASE__ = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
SCREAMING_SNAKE_CASE__ = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 46
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , SCREAMING_SNAKE_CASE )
print("""Decoded:""" , decode(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 46
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
super().__init__(**lowercase )
lowerCAmelCase = size if size is not None else {"""shortest_edge""": 256}
lowerCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase = get_size_dict(lowercase , param_name="""crop_size""" )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = resample
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = offset
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" in size:
lowerCAmelCase = get_resize_output_image_size(lowercase , size["""shortest_edge"""] , default_to_square=lowercase )
elif "height" in size and "width" in size:
lowerCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase = True , lowercase = None , **lowercase , ) -> Tuple:
lowerCAmelCase = image.astype(np.floataa )
if offset:
lowerCAmelCase = image - (scale / 2)
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase = to_numpy_array(lowercase )
if do_resize:
lowerCAmelCase = self.resize(image=lowercase , size=lowercase , resample=lowercase )
if do_center_crop:
lowerCAmelCase = self.center_crop(lowercase , size=lowercase )
if do_rescale:
lowerCAmelCase = self.rescale(image=lowercase , scale=lowercase , offset=lowercase )
if do_normalize:
lowerCAmelCase = self.normalize(image=lowercase , mean=lowercase , std=lowercase )
lowerCAmelCase = to_channel_dimension_format(lowercase , lowercase )
return image
def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = offset if offset is not None else self.offset
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(lowercase , param_name="""crop_size""" )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCAmelCase = make_batched(lowercase )
lowerCAmelCase = [
[
self._preprocess_image(
image=lowercase , do_resize=lowercase , size=lowercase , resample=lowercase , do_center_crop=lowercase , crop_size=lowercase , do_rescale=lowercase , rescale_factor=lowercase , offset=lowercase , do_normalize=lowercase , image_mean=lowercase , image_std=lowercase , data_format=lowercase , )
for img in video
]
for video in videos
]
lowerCAmelCase = {"""pixel_values""": videos}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 46
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.